remove old API
it was dookie anyways
This commit is contained in:
parent
286a46ac1b
commit
822628d4e3
|
@ -1,16 +0,0 @@
|
|||
# Database Configuration
|
||||
DATABASE_URL=postgres://postgres:postgres@localhost:5432/litecloud
|
||||
|
||||
# JWT Authentication & Encryption
|
||||
JWT_SECRET=your_jwt_secret_key_here_make_it_long_and_random
|
||||
JWT_EXPIRATION=86400 # in seconds
|
||||
|
||||
# Generate a secure random key with: openssl rand -base64 32
|
||||
MASTER_KEY=YourBase64EncodedMasterKeyHere
|
||||
|
||||
STORAGE_PATH=./storage
|
||||
DEFAULT_USER_QUOTA=5368709120 # 5GB in bytes
|
||||
|
||||
RUST_LOG=info,tower_http=debug
|
||||
PORT=8080
|
||||
HOST=0.0.0.0
|
|
@ -1,30 +0,0 @@
|
|||
[package]
|
||||
name = "litecloud"
|
||||
version = "0.1.0"
|
||||
|
||||
[dependencies]
|
||||
axum = { version = "0.6", features = ["multipart"] }
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tower = "0.4"
|
||||
tower-http = { version = "0.4", features = ["fs", "trace", "cors"] }
|
||||
sqlx = { version = "0.7", features = ["runtime-tokio", "tls-rustls", "postgres", "uuid", "time", "json"] }
|
||||
argon2 = "0.5"
|
||||
jsonwebtoken = "8"
|
||||
aes-gcm = "0.10"
|
||||
rand = "0.8"
|
||||
base64 = "0.21"
|
||||
uuid = { version = "1", features = ["v4", "serde"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
thiserror = "1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
dotenvy = "0.15"
|
||||
once_cell = "1"
|
||||
time = { version = "0.3", features = ["serde"] }
|
||||
validator = { version = "0.16", features = ["derive"] }
|
||||
futures = "0.3"
|
||||
bytes = "1"
|
||||
tokio-util = { version = "0.7", features = ["io"] }
|
||||
tokio-stream = "0.1"
|
||||
async-trait = "0.1"
|
|
@ -1,60 +0,0 @@
|
|||
CREATE TABLE users (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
email VARCHAR(255) NOT NULL UNIQUE,
|
||||
username VARCHAR(255) NOT NULL,
|
||||
password_hash VARCHAR(255) NOT NULL,
|
||||
role VARCHAR(50) NOT NULL DEFAULT 'user',
|
||||
storage_used BIGINT NOT NULL DEFAULT 0,
|
||||
storage_quota BIGINT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE TABLE files (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
file_type VARCHAR(50) NOT NULL,
|
||||
mime_type VARCHAR(255),
|
||||
size BIGINT NOT NULL DEFAULT 0,
|
||||
owner_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
parent_id UUID REFERENCES files(id) ON DELETE CASCADE,
|
||||
path_depth INT NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
CONSTRAINT valid_path_depth CHECK (path_depth >= 0 AND path_depth <= 20)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_files_parent_id ON files(parent_id);
|
||||
CREATE INDEX idx_files_owner_id ON files(owner_id);
|
||||
|
||||
CREATE TABLE permissions (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name VARCHAR(50) NOT NULL UNIQUE,
|
||||
can_read BOOLEAN NOT NULL DEFAULT TRUE,
|
||||
can_write BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
can_share BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE TABLE shares (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
file_id UUID NOT NULL REFERENCES files(id) ON DELETE CASCADE,
|
||||
owner_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
recipient_id UUID REFERENCES users(id) ON DELETE CASCADE,
|
||||
permission_id UUID NOT NULL REFERENCES permissions(id),
|
||||
access_key VARCHAR(255) NOT NULL UNIQUE,
|
||||
expires_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_shares_file_id ON shares(file_id);
|
||||
CREATE INDEX idx_shares_owner_id ON shares(owner_id);
|
||||
CREATE INDEX idx_shares_recipient_id ON shares(recipient_id);
|
||||
CREATE INDEX idx_shares_access_key ON shares(access_key);
|
||||
|
||||
INSERT INTO permissions (name, can_read, can_write, can_share) VALUES
|
||||
('viewer', TRUE, FALSE, FALSE),
|
||||
('editor', TRUE, TRUE, FALSE),
|
||||
('owner', TRUE, TRUE, TRUE);
|
|
@ -1,51 +0,0 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
pub struct Config {
|
||||
pub database_url: String,
|
||||
pub jwt_secret: String,
|
||||
pub jwt_expiration: i64,
|
||||
pub master_key: String,
|
||||
pub storage_path: String,
|
||||
pub default_user_quota: i64,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_env() -> Self {
|
||||
let database_url = std::env::var("DATABASE_URL")
|
||||
.expect("DATABASE_URL must be set");
|
||||
|
||||
let jwt_secret = std::env::var("JWT_SECRET")
|
||||
.expect("JWT_SECRET must be set");
|
||||
|
||||
let jwt_expiration = std::env::var("JWT_EXPIRATION")
|
||||
.unwrap_or_else(|_| "86400".to_string()) // Default to 24 hours
|
||||
.parse::<i64>()
|
||||
.expect("JWT_EXPIRATION must be a valid number");
|
||||
|
||||
let master_key = std::env::var("MASTER_KEY")
|
||||
.expect("MASTER_KEY must be set");
|
||||
|
||||
let storage_path = std::env::var("STORAGE_PATH")
|
||||
.unwrap_or_else(|_| "./storage".to_string());
|
||||
|
||||
let default_user_quota = std::env::var("DEFAULT_USER_QUOTA")
|
||||
.unwrap_or_else(|_| "5368709120".to_string()) // Default to 5GB
|
||||
.parse::<i64>()
|
||||
.expect("DEFAULT_USER_QUOTA must be a valid number");
|
||||
|
||||
Self {
|
||||
database_url,
|
||||
jwt_secret,
|
||||
jwt_expiration,
|
||||
master_key,
|
||||
storage_path,
|
||||
default_user_quota,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_user_storage_path(&self, user_id: &str) -> PathBuf {
|
||||
let mut path = PathBuf::from(&self.storage_path);
|
||||
path.push(user_id);
|
||||
path
|
||||
}
|
||||
}
|
119
api/src/error.rs
119
api/src/error.rs
|
@ -1,119 +0,0 @@
|
|||
use axum::{
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
Json,
|
||||
};
|
||||
use serde_json::json;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AppError {
|
||||
#[error("Authentication required")]
|
||||
Unauthorized,
|
||||
|
||||
#[error("Access denied")]
|
||||
AccessDenied,
|
||||
|
||||
#[error("User not found")]
|
||||
UserNotFound,
|
||||
|
||||
#[error("User already exists")]
|
||||
UserAlreadyExists,
|
||||
|
||||
#[error("Invalid credentials")]
|
||||
InvalidCredentials,
|
||||
|
||||
#[error("Invalid or expired token")]
|
||||
InvalidToken,
|
||||
|
||||
#[error("File not found")]
|
||||
FileNotFound,
|
||||
|
||||
#[error("File already exists")]
|
||||
FileAlreadyExists,
|
||||
|
||||
#[error("Directory already exists")]
|
||||
DirectoryAlreadyExists,
|
||||
|
||||
#[error("Not a directory")]
|
||||
NotADirectory,
|
||||
|
||||
#[error("Storage quota exceeded")]
|
||||
StorageQuotaExceeded,
|
||||
|
||||
#[error("Path too deep")]
|
||||
PathTooDeep,
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
DatabaseError(#[from] sqlx::Error),
|
||||
|
||||
#[error("Password hashing error: {0}")]
|
||||
PasswordHashingError(#[from] argon2::password_hash::Error),
|
||||
|
||||
#[error("JWT error: {0}")]
|
||||
JwtError(#[from] jsonwebtoken::errors::Error),
|
||||
|
||||
#[error("Encryption error: {0}")]
|
||||
EncryptionError(String),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
|
||||
#[error("Invalid input: {0}")]
|
||||
ValidationError(String),
|
||||
|
||||
#[error("Internal server error")]
|
||||
InternalServerError,
|
||||
}
|
||||
|
||||
impl IntoResponse for AppError {
|
||||
fn into_response(self) -> Response {
|
||||
let (status, error_message) = match self {
|
||||
Self::Unauthorized => (StatusCode::UNAUTHORIZED, self.to_string()),
|
||||
Self::AccessDenied => (StatusCode::FORBIDDEN, self.to_string()),
|
||||
Self::UserNotFound | Self::FileNotFound => (StatusCode::NOT_FOUND, self.to_string()),
|
||||
Self::UserAlreadyExists | Self::FileAlreadyExists | Self::DirectoryAlreadyExists => {
|
||||
(StatusCode::CONFLICT, self.to_string())
|
||||
}
|
||||
Self::InvalidCredentials | Self::InvalidToken => {
|
||||
(StatusCode::UNAUTHORIZED, self.to_string())
|
||||
}
|
||||
Self::NotADirectory | Self::ValidationError(_) => {
|
||||
(StatusCode::BAD_REQUEST, self.to_string())
|
||||
}
|
||||
Self::StorageQuotaExceeded => {
|
||||
(StatusCode::PAYLOAD_TOO_LARGE, self.to_string())
|
||||
}
|
||||
Self::PathTooDeep => (StatusCode::BAD_REQUEST, self.to_string()),
|
||||
Self::DatabaseError(e) => {
|
||||
tracing::error!("Database error: {:?}", e);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
|
||||
}
|
||||
Self::PasswordHashingError(e) => {
|
||||
tracing::error!("Password hashing error: {:?}", e);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
|
||||
}
|
||||
Self::JwtError(e) => {
|
||||
tracing::error!("JWT error: {:?}", e);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
|
||||
}
|
||||
Self::EncryptionError(e) => {
|
||||
tracing::error!("Encryption error: {}", e);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
|
||||
}
|
||||
Self::IoError(e) => {
|
||||
tracing::error!("IO error: {:?}", e);
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
|
||||
}
|
||||
Self::InternalServerError => {
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, "Internal server error".to_string())
|
||||
}
|
||||
};
|
||||
|
||||
let body = Json(json!({
|
||||
"error": error_message
|
||||
}));
|
||||
|
||||
(status, body).into_response()
|
||||
}
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
use axum::{
|
||||
error_handling::HandleErrorLayer,
|
||||
extract::Extension,
|
||||
http::{HeaderValue, Method, StatusCode},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use dotenvy::dotenv;
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
use std::{env, net::SocketAddr, path::PathBuf, sync::Arc, time::Duration};
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::{cors::CorsLayer, services::ServeDir, trace::TraceLayer};
|
||||
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
|
||||
|
||||
mod config;
|
||||
mod error;
|
||||
mod models;
|
||||
mod routes;
|
||||
mod services;
|
||||
mod utils;
|
||||
|
||||
use config::Config;
|
||||
use error::AppError;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
dotenv().ok();
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(tracing_subscriber::EnvFilter::new(
|
||||
env::var("RUST_LOG").unwrap_or_else(|_| "info,tower_http=debug".into()),
|
||||
))
|
||||
.with(tracing_subscriber::fmt::layer())
|
||||
.init();
|
||||
|
||||
let config = Config::from_env();
|
||||
let config = Arc::new(config);
|
||||
|
||||
let storage_path = PathBuf::from(&config.storage_path);
|
||||
if !storage_path.exists() {
|
||||
std::fs::create_dir_all(&storage_path)?;
|
||||
}
|
||||
|
||||
let pool = PgPoolOptions::new()
|
||||
.max_connections(10)
|
||||
.connect(&config.database_url)
|
||||
.await?
|
||||
|
||||
sqlx::migrate!("./migrations")
|
||||
.run(&pool)
|
||||
.await?
|
||||
|
||||
tracing::info!("Database migrations applied successfully");
|
||||
|
||||
let encryption_service = services::encryption::EncryptionService::new(&config.master_key);
|
||||
|
||||
let cors = CorsLayer::new()
|
||||
.allow_origin("*".parse::<HeaderValue>().unwrap())
|
||||
.allow_methods([Method::GET, Method::POST, Method::PUT, Method::DELETE])
|
||||
.allow_headers([axum::http::header::CONTENT_TYPE, axum::http::header::AUTHORIZATION]);
|
||||
|
||||
let api_router = Router::new()
|
||||
.nest("/api", routes::api_routes())
|
||||
.layer(
|
||||
ServiceBuilder::new()
|
||||
.layer(HandleErrorLayer::new(|error| async move {
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Unhandled error: {}", error),
|
||||
)
|
||||
}))
|
||||
.layer(TraceLayer::new_for_http())
|
||||
.layer(Extension(pool.clone()))
|
||||
.layer(Extension(Arc::clone(&config)))
|
||||
.layer(Extension(encryption_service)),
|
||||
);
|
||||
|
||||
let static_files_service = ServeDir::new("static");
|
||||
let static_router = Router::new().nest_service("/", static_files_service.clone());
|
||||
|
||||
let app = Router::new()
|
||||
.merge(api_router)
|
||||
.fallback_service(static_router)
|
||||
.layer(cors);
|
||||
|
||||
let addr = SocketAddr::from(([0, 0, 0, 0], 8080));
|
||||
tracing::info!("Listening on {}", addr);
|
||||
|
||||
axum::Server::bind(&addr)
|
||||
.serve(app.into_make_service())
|
||||
.await?
|
||||
|
||||
Ok(())
|
||||
}
|
|
@ -1,239 +0,0 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::{postgres::PgPool, FromRow};
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, FromRow, Clone, PartialEq)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum FileType {
|
||||
File,
|
||||
Directory,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, FromRow)]
|
||||
pub struct File {
|
||||
pub id: Uuid,
|
||||
pub name: String,
|
||||
pub file_type: FileType,
|
||||
pub mime_type: Option<String>,
|
||||
pub size: i64,
|
||||
pub owner_id: Uuid,
|
||||
pub parent_id: Option<Uuid>,
|
||||
pub encryption_key: Option<String>, // Encrypted with master key
|
||||
pub encryption_iv: Option<String>, // Initialization vector for AES-GCM
|
||||
pub created_at: OffsetDateTime,
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CreateFileDto {
|
||||
pub name: String,
|
||||
pub file_type: FileType,
|
||||
pub mime_type: Option<String>,
|
||||
pub size: i64,
|
||||
pub parent_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CreateDirectoryDto {
|
||||
pub name: String,
|
||||
pub parent_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
impl File {
|
||||
pub async fn create_file(
|
||||
pool: &PgPool,
|
||||
dto: CreateFileDto,
|
||||
owner_id: Uuid,
|
||||
encryption_key: Option<String>,
|
||||
encryption_iv: Option<String>,
|
||||
) -> Result<Self, AppError> {
|
||||
if let Some(parent_id) = dto.parent_id {
|
||||
let parent = Self::find_by_id(pool, parent_id).await?;
|
||||
if parent.file_type != FileType::Directory {
|
||||
return Err(AppError::NotADirectory);
|
||||
}
|
||||
if parent.owner_id != owner_id {
|
||||
// TODO: Check if user has write permission through sharing
|
||||
return Err(AppError::AccessDenied);
|
||||
}
|
||||
}
|
||||
|
||||
let existing_file = sqlx::query!("SELECT id FROM files WHERE name = $1 AND parent_id IS NOT DISTINCT FROM $2 AND owner_id = $3",
|
||||
dto.name, dto.parent_id, owner_id)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
|
||||
if existing_file.is_some() {
|
||||
return Err(AppError::FileAlreadyExists);
|
||||
}
|
||||
|
||||
let file = sqlx::query_as!(File,
|
||||
r#"INSERT INTO files (id, name, file_type, mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
||||
RETURNING id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at"#,
|
||||
Uuid::new_v4(),
|
||||
dto.name,
|
||||
dto.file_type as FileType,
|
||||
dto.mime_type,
|
||||
dto.size,
|
||||
owner_id,
|
||||
dto.parent_id,
|
||||
encryption_key,
|
||||
encryption_iv,
|
||||
OffsetDateTime::now_utc(),
|
||||
OffsetDateTime::now_utc()
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
Ok(file)
|
||||
}
|
||||
|
||||
pub async fn create_directory(
|
||||
pool: &PgPool,
|
||||
dto: CreateDirectoryDto,
|
||||
owner_id: Uuid,
|
||||
) -> Result<Self, AppError> {
|
||||
if let Some(parent_id) = dto.parent_id {
|
||||
let parent = Self::find_by_id(pool, parent_id).await?;
|
||||
if parent.file_type != FileType::Directory {
|
||||
return Err(AppError::NotADirectory);
|
||||
}
|
||||
if parent.owner_id != owner_id {
|
||||
// TODO: Check if user has write permission through sharing
|
||||
return Err(AppError::AccessDenied);
|
||||
}
|
||||
}
|
||||
|
||||
let existing_dir = sqlx::query!("SELECT id FROM files WHERE name = $1 AND parent_id IS NOT DISTINCT FROM $2 AND owner_id = $3 AND file_type = 'directory'",
|
||||
dto.name, dto.parent_id, owner_id)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
|
||||
if existing_dir.is_some() {
|
||||
return Err(AppError::DirectoryAlreadyExists);
|
||||
}
|
||||
|
||||
let directory = sqlx::query_as!(File,
|
||||
r#"INSERT INTO files (id, name, file_type, mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
|
||||
RETURNING id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at"#,
|
||||
Uuid::new_v4(),
|
||||
dto.name,
|
||||
FileType::Directory,
|
||||
None as Option<String>,
|
||||
0i64,
|
||||
owner_id,
|
||||
dto.parent_id,
|
||||
None as Option<String>,
|
||||
None as Option<String>,
|
||||
OffsetDateTime::now_utc(),
|
||||
OffsetDateTime::now_utc()
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
Ok(directory)
|
||||
}
|
||||
|
||||
pub async fn find_by_id(pool: &PgPool, id: Uuid) -> Result<Self, AppError> {
|
||||
let file = sqlx::query_as!(File,
|
||||
r#"SELECT id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at
|
||||
FROM files WHERE id = $1"#,
|
||||
id
|
||||
)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
.ok_or(AppError::FileNotFound)?;
|
||||
|
||||
Ok(file)
|
||||
}
|
||||
|
||||
pub async fn list_directory(
|
||||
pool: &PgPool,
|
||||
directory_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
) -> Result<Vec<Self>, AppError> {
|
||||
let files = if let Some(dir_id) = directory_id {
|
||||
let directory = Self::find_by_id(pool, dir_id).await?;
|
||||
if directory.file_type != FileType::Directory {
|
||||
return Err(AppError::NotADirectory);
|
||||
}
|
||||
|
||||
if directory.owner_id != user_id {
|
||||
return Err(AppError::AccessDenied);
|
||||
}
|
||||
|
||||
sqlx::query_as!(File,
|
||||
r#"SELECT id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at
|
||||
FROM files WHERE parent_id = $1 ORDER BY file_type, name"#,
|
||||
dir_id
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as!(File,
|
||||
r#"SELECT id, name, file_type as "file_type: FileType", mime_type, size, owner_id, parent_id, encryption_key, encryption_iv, created_at, updated_at
|
||||
FROM files WHERE parent_id IS NULL AND owner_id = $1 ORDER BY file_type, name"#,
|
||||
user_id
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?
|
||||
};
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
pub async fn delete(
|
||||
pool: &PgPool,
|
||||
id: Uuid,
|
||||
user_id: Uuid,
|
||||
) -> Result<(), AppError> {
|
||||
let file = Self::find_by_id(pool, id).await?;
|
||||
|
||||
if file.owner_id != user_id {
|
||||
return Err(AppError::AccessDenied);
|
||||
}
|
||||
|
||||
if file.file_type == FileType::Directory {
|
||||
let files_in_dir = Self::list_directory(pool, Some(id), user_id).await?;
|
||||
|
||||
for file in files_in_dir {
|
||||
Self::delete(pool, file.id, user_id).await?;
|
||||
}
|
||||
}
|
||||
|
||||
sqlx::query!("DELETE FROM files WHERE id = $1", id)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_file_path(pool: &PgPool, id: Uuid) -> Result<String, AppError> {
|
||||
let file = Self::find_by_id(pool, id).await?;
|
||||
|
||||
let mut path_parts = vec![file.name.clone()];
|
||||
let mut current_parent_id = file.parent_id;
|
||||
|
||||
let mut depth = 0;
|
||||
const MAX_DEPTH: usize = 100;
|
||||
|
||||
while let Some(parent_id) = current_parent_id {
|
||||
if depth >= MAX_DEPTH {
|
||||
return Err(AppError::PathTooDeep);
|
||||
}
|
||||
|
||||
let parent = Self::find_by_id(pool, parent_id).await?;
|
||||
path_parts.push(parent.name.clone());
|
||||
current_parent_id = parent.parent_id;
|
||||
depth += 1;
|
||||
}
|
||||
|
||||
path_parts.reverse();
|
||||
Ok(path_parts.join("/"))
|
||||
}
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
pub mod user;
|
||||
pub mod file;
|
||||
pub mod share;
|
||||
pub mod permission;
|
||||
|
||||
pub use user::User;
|
||||
pub use file::{File, FileType};
|
||||
pub use share::{Share, ShareType};
|
||||
pub use permission::{Permission, Role};
|
|
@ -1,93 +0,0 @@
|
|||
use crate::error::AppError;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::PgPool;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct Permission {
|
||||
pub id: Uuid,
|
||||
pub name: String,
|
||||
pub can_read: bool,
|
||||
pub can_write: bool,
|
||||
pub can_share: bool,
|
||||
pub created_at: OffsetDateTime,
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl Permission {
|
||||
pub async fn find_by_id(pool: &PgPool, id: Uuid) -> Result<Permission, AppError> {
|
||||
let permission = sqlx::query_as::<_, Permission>(
|
||||
r#"
|
||||
SELECT * FROM permissions WHERE id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(id)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
permission.ok_or_else(|| AppError::NotFound("Permission not found".to_string()))
|
||||
}
|
||||
|
||||
pub async fn find_by_name(pool: &PgPool, name: &str) -> Result<Permission, AppError> {
|
||||
let permission = sqlx::query_as::<_, Permission>(
|
||||
r#"
|
||||
SELECT * FROM permissions WHERE name = $1
|
||||
"#,
|
||||
)
|
||||
.bind(name)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
permission.ok_or_else(|| AppError::NotFound(format!("Permission '{}' not found", name)))
|
||||
}
|
||||
|
||||
pub async fn list_all(pool: &PgPool) -> Result<Vec<Permission>, AppError> {
|
||||
let permissions = sqlx::query_as::<_, Permission>(
|
||||
r#"
|
||||
SELECT * FROM permissions
|
||||
"#,
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
Ok(permissions)
|
||||
}
|
||||
|
||||
pub async fn create_default_permissions(pool: &PgPool) -> Result<(), AppError> {
|
||||
// Check if permissions already exist
|
||||
let existing = Self::list_all(pool).await?;
|
||||
if !existing.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Create default permission levels
|
||||
let permissions = [
|
||||
("viewer", true, false, false),
|
||||
("editor", true, true, false),
|
||||
("admin", true, true, true),
|
||||
];
|
||||
|
||||
for (name, can_read, can_write, can_share) in permissions {
|
||||
sqlx::query(
|
||||
r#"
|
||||
INSERT INTO permissions (name, can_read, can_write, can_share)
|
||||
VALUES ($1, $2, $3, $4)
|
||||
ON CONFLICT (name) DO NOTHING
|
||||
"#,
|
||||
)
|
||||
.bind(name)
|
||||
.bind(can_read)
|
||||
.bind(can_write)
|
||||
.bind(can_share)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
use crate::error::AppError;
|
||||
use crate::models::file::File;
|
||||
use crate::models::permission::Permission;
|
||||
use crate::models::user::User;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::PgPool;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, sqlx::FromRow)]
|
||||
pub struct Share {
|
||||
pub id: Uuid,
|
||||
pub file_id: Uuid,
|
||||
pub owner_id: Uuid,
|
||||
pub recipient_id: Option<Uuid>,
|
||||
pub permission_id: Uuid,
|
||||
pub access_key: String,
|
||||
pub expires_at: Option<OffsetDateTime>,
|
||||
pub created_at: OffsetDateTime,
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CreateShareDto {
|
||||
pub file_id: Uuid,
|
||||
pub recipient_id: Option<Uuid>,
|
||||
pub permission_id: Uuid,
|
||||
pub expires_at: Option<OffsetDateTime>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ShareResponse {
|
||||
pub id: Uuid,
|
||||
pub file: File,
|
||||
pub owner: User,
|
||||
pub recipient: Option<User>,
|
||||
pub permission: Permission,
|
||||
pub access_key: String,
|
||||
pub expires_at: Option<OffsetDateTime>,
|
||||
pub created_at: OffsetDateTime,
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
impl Share {
|
||||
pub async fn create(
|
||||
pool: &PgPool,
|
||||
owner_id: Uuid,
|
||||
dto: CreateShareDto,
|
||||
) -> Result<Share, AppError> {
|
||||
// Verify file exists and user has access to it, then create a random key for it
|
||||
let file = File::find_by_id(pool, dto.file_id).await?
|
||||
|
||||
if file.owner_id != owner_id {
|
||||
return Err(AppError::AccessDenied("You can only share files you own".to_string()));
|
||||
}
|
||||
let access_key = Uuid::new_v4().to_string();
|
||||
|
||||
let share = sqlx::query_as::<_, Share>(
|
||||
r#"
|
||||
INSERT INTO shares (file_id, owner_id, recipient_id, permission_id, access_key, expires_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6)
|
||||
RETURNING *
|
||||
"#,
|
||||
)
|
||||
.bind(dto.file_id)
|
||||
.bind(owner_id)
|
||||
.bind(dto.recipient_id)
|
||||
.bind(dto.permission_id)
|
||||
.bind(&access_key)
|
||||
.bind(dto.expires_at)
|
||||
.fetch_one(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
Ok(share)
|
||||
}
|
||||
|
||||
pub async fn find_by_id(pool: &PgPool, id: Uuid) -> Result<Share, AppError> {
|
||||
let share = sqlx::query_as::<_, Share>(
|
||||
r#"
|
||||
SELECT * FROM shares WHERE id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(id)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
share.ok_or_else(|| AppError::NotFound("Share not found".to_string()))
|
||||
}
|
||||
|
||||
pub async fn find_by_access_key(pool: &PgPool, access_key: &str) -> Result<Share, AppError> {
|
||||
let share = sqlx::query_as::<_, Share>(
|
||||
r#"
|
||||
SELECT * FROM shares WHERE access_key = $1
|
||||
"#,
|
||||
)
|
||||
.bind(access_key)
|
||||
.fetch_optional(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
share.ok_or_else(|| AppError::NotFound("Share not found".to_string()))
|
||||
}
|
||||
|
||||
pub async fn list_by_owner(pool: &PgPool, owner_id: Uuid) -> Result<Vec<Share>, AppError> {
|
||||
let shares = sqlx::query_as::<_, Share>(
|
||||
r#"
|
||||
SELECT * FROM shares WHERE owner_id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(owner_id)
|
||||
.fetch_all(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
Ok(shares)
|
||||
}
|
||||
|
||||
pub async fn list_by_recipient(pool: &PgPool, recipient_id: Uuid) -> Result<Vec<Share>, AppError> {
|
||||
let shares = sqlx::query_as::<_, Share>(
|
||||
r#"
|
||||
SELECT * FROM shares WHERE recipient_id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(recipient_id)
|
||||
.fetch_all(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
Ok(shares)
|
||||
}
|
||||
|
||||
pub async fn delete(pool: &PgPool, id: Uuid, user_id: Uuid) -> Result<(), AppError> {
|
||||
let share = Self::find_by_id(pool, id).await?
|
||||
|
||||
if share.owner_id != user_id {
|
||||
return Err(AppError::AccessDenied("You can only delete shares you own".to_string()));
|
||||
}
|
||||
|
||||
sqlx::query(
|
||||
r#"
|
||||
DELETE FROM shares WHERE id = $1
|
||||
"#,
|
||||
)
|
||||
.bind(id)
|
||||
.execute(pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_full_share_details(pool: &PgPool, id: Uuid) -> Result<ShareResponse, AppError> {
|
||||
let share = Self::find_by_id(pool, id).await?
|
||||
|
||||
let file = File::find_by_id(pool, share.file_id).await?
|
||||
let owner = User::find_by_id(pool, share.owner_id).await?
|
||||
|
||||
let recipient = match share.recipient_id {
|
||||
Some(recipient_id) => Some(User::find_by_id(pool, recipient_id).await?),
|
||||
None => None,
|
||||
};
|
||||
|
||||
let permission = Permission::find_by_id(pool, share.permission_id).await?
|
||||
|
||||
Ok(ShareResponse {
|
||||
id: share.id,
|
||||
file,
|
||||
owner,
|
||||
recipient,
|
||||
permission,
|
||||
access_key: share.access_key,
|
||||
expires_at: share.expires_at,
|
||||
created_at: share.created_at,
|
||||
updated_at: share.updated_at,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn is_valid_share(
|
||||
pool: &PgPool,
|
||||
share_id: Uuid,
|
||||
user_id: Option<Uuid>,
|
||||
) -> Result<bool, AppError> {
|
||||
let share = Self::find_by_id(pool, share_id).await?
|
||||
|
||||
if let Some(expires_at) = share.expires_at {
|
||||
if expires_at < OffsetDateTime::now_utc() {
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
// If share has a specific recipient, check if the user is that recipient - otherwise return a valid share for everyone with the link
|
||||
if let Some(recipient_id) = share.recipient_id {
|
||||
if let Some(user_id) = user_id {
|
||||
return Ok(recipient_id == user_id);
|
||||
}
|
||||
return Ok(false);
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
}
|
|
@ -1,137 +0,0 @@
|
|||
use argon2::{password_hash::SaltString, Argon2, PasswordHash, PasswordHasher, PasswordVerifier};
|
||||
use rand::rngs::OsRng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::{postgres::PgPool, FromRow};
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, FromRow)]
|
||||
pub struct User {
|
||||
pub id: Uuid,
|
||||
pub email: String,
|
||||
#[serde(skip_serializing)]
|
||||
pub password_hash: String,
|
||||
pub display_name: Option<String>,
|
||||
pub storage_used: i64,
|
||||
pub storage_quota: i64,
|
||||
pub created_at: OffsetDateTime,
|
||||
pub updated_at: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CreateUserDto {
|
||||
pub email: String,
|
||||
pub password: String,
|
||||
pub display_name: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct LoginUserDto {
|
||||
pub email: String,
|
||||
pub password: String,
|
||||
}
|
||||
|
||||
impl User {
|
||||
pub async fn create(pool: &PgPool, dto: CreateUserDto) -> Result<Self, AppError> {
|
||||
let existing_user = sqlx::query!("SELECT id FROM users WHERE email = $1", dto.email)
|
||||
.fetch_optional(pool)
|
||||
.await?;
|
||||
|
||||
if existing_user.is_some() {
|
||||
return Err(AppError::UserAlreadyExists);
|
||||
}
|
||||
let password_hash = Self::hash_password(&dto.password)?;
|
||||
|
||||
let storage_quota = 1_073_741_824;
|
||||
|
||||
let user = sqlx::query_as!(User,
|
||||
r#"INSERT INTO users (id, email, password_hash, display_name, storage_used, storage_quota, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
|
||||
RETURNING id, email, password_hash, display_name, storage_used, storage_quota, created_at, updated_at"#,
|
||||
Uuid::new_v4(),
|
||||
dto.email,
|
||||
password_hash,
|
||||
dto.display_name,
|
||||
0i64,
|
||||
storage_quota,
|
||||
OffsetDateTime::now_utc(),
|
||||
OffsetDateTime::now_utc()
|
||||
)
|
||||
.fetch_one(pool)
|
||||
.await?;
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub async fn find_by_id(pool: &PgPool, id: Uuid) -> Result<Self, AppError> {
|
||||
let user = sqlx::query_as!(User,
|
||||
r#"SELECT id, email, password_hash, display_name, storage_used, storage_quota, created_at, updated_at
|
||||
FROM users WHERE id = $1"#,
|
||||
id
|
||||
)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
.ok_or(AppError::UserNotFound)?;
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub async fn find_by_email(pool: &PgPool, email: &str) -> Result<Self, AppError> {
|
||||
let user = sqlx::query_as!(User,
|
||||
r#"SELECT id, email, password_hash, display_name, storage_used, storage_quota, created_at, updated_at
|
||||
FROM users WHERE email = $1"#,
|
||||
email
|
||||
)
|
||||
.fetch_optional(pool)
|
||||
.await?
|
||||
.ok_or(AppError::UserNotFound)?;
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub async fn authenticate(pool: &PgPool, dto: LoginUserDto) -> Result<Self, AppError> {
|
||||
let user = Self::find_by_email(pool, &dto.email).await?;
|
||||
|
||||
if !Self::verify_password(&dto.password, &user.password_hash)? {
|
||||
return Err(AppError::InvalidCredentials);
|
||||
}
|
||||
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
pub async fn update_storage_used(pool: &PgPool, user_id: Uuid, bytes_added: i64) -> Result<(), AppError> {
|
||||
let mut user = Self::find_by_id(pool, user_id).await?;
|
||||
|
||||
let new_storage_used = user.storage_used + bytes_added;
|
||||
|
||||
if new_storage_used > user.storage_quota {
|
||||
return Err(AppError::StorageQuotaExceeded);
|
||||
}
|
||||
sqlx::query!("UPDATE users SET storage_used = $1, updated_at = $2 WHERE id = $3",
|
||||
new_storage_used,
|
||||
OffsetDateTime::now_utc(),
|
||||
user_id
|
||||
)
|
||||
.execute(pool)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn hash_password(password: &str) -> Result<String, AppError> {
|
||||
let salt = SaltString::generate(&mut OsRng);
|
||||
let argon2 = Argon2::default();
|
||||
let password_hash = argon2
|
||||
.hash_password(password.as_bytes(), &salt)?
|
||||
.to_string();
|
||||
Ok(password_hash)
|
||||
}
|
||||
|
||||
fn verify_password(password: &str, hash: &str) -> Result<bool, AppError> {
|
||||
let parsed_hash = PasswordHash::new(hash)?;
|
||||
let argon2 = Argon2::default();
|
||||
Ok(argon2.verify_password(password.as_bytes(), &parsed_hash).is_ok())
|
||||
}
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
use axum::{
|
||||
extract::Extension,
|
||||
routing::post,
|
||||
Json, Router,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::error::AppError;
|
||||
use crate::models::user::{CreateUserDto, LoginUserDto, User};
|
||||
use crate::utils::jwt;
|
||||
|
||||
pub fn routes() -> Router {
|
||||
Router::new()
|
||||
.route("/auth/register", post(register))
|
||||
.route("/auth/login", post(login))
|
||||
}
|
||||
|
||||
async fn register(
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Extension(config): Extension<Arc<Config>>,
|
||||
Json(create_user_dto): Json<CreateUserDto>,
|
||||
) -> Result<Json<serde_json::Value>, AppError> {
|
||||
let user = User::create(&pool, create_user_dto, &config).await?;
|
||||
|
||||
let token = jwt::generate_token(
|
||||
&config,
|
||||
user.id,
|
||||
&user.email,
|
||||
&user.role,
|
||||
)?;
|
||||
|
||||
Ok(Json(serde_json::json!({
|
||||
"user": user,
|
||||
"token": token
|
||||
})))
|
||||
}
|
||||
|
||||
async fn login(
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Extension(config): Extension<Arc<Config>>,
|
||||
Json(login_dto): Json<LoginUserDto>,
|
||||
) -> Result<Json<serde_json::Value>, AppError> {
|
||||
let user = User::authenticate(&pool, &login_dto.email, &login_dto.password).await?;
|
||||
|
||||
let token = jwt::generate_token(
|
||||
&config,
|
||||
user.id,
|
||||
&user.email,
|
||||
&user.role,
|
||||
)?;
|
||||
|
||||
Ok(Json(serde_json::json!({
|
||||
"user": user,
|
||||
"token": token
|
||||
})))
|
||||
}
|
|
@ -1,185 +0,0 @@
|
|||
use axum::{
|
||||
body::StreamBody,
|
||||
extract::{Extension, Multipart, Path},
|
||||
http::{HeaderMap, StatusCode},
|
||||
response::IntoResponse,
|
||||
routing::{delete, get, post},
|
||||
Json, Router,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use tokio::fs::File;
|
||||
use tokio_util::io::ReaderStream;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::error::AppError;
|
||||
use crate::models::file::{CreateDirectoryDto, CreateFileDto, File as FileModel, FileType};
|
||||
use crate::services::auth::AuthUser;
|
||||
use crate::services::encryption::EncryptionService;
|
||||
use crate::services::storage::StorageService;
|
||||
|
||||
pub fn routes() -> Router {
|
||||
Router::new()
|
||||
.route("/files", get(list_files))
|
||||
.route("/files/:id", get(get_file))
|
||||
.route("/files/:id/download", get(download_file))
|
||||
.route("/files/upload", post(upload_file))
|
||||
.route("/files/directory", post(create_directory))
|
||||
.route("/files/:id", delete(delete_file))
|
||||
}
|
||||
|
||||
async fn list_files(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Path(parent_id): Option<Path<Uuid>>,
|
||||
) -> Result<Json<Vec<FileModel>>, AppError> {
|
||||
let files = match parent_id {
|
||||
Some(parent_id) => FileModel::list_by_parent(&pool, parent_id, auth_user.id).await?,
|
||||
None => FileModel::list_root_directory(&pool, auth_user.id).await?,
|
||||
};
|
||||
|
||||
Ok(Json(files))
|
||||
}
|
||||
|
||||
async fn get_file(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<Json<FileModel>, AppError> {
|
||||
let file = FileModel::find_by_id(&pool, id).await?;
|
||||
|
||||
// Check if user has access to this file
|
||||
if file.owner_id != auth_user.id {
|
||||
return Err(AppError::AccessDenied("You don't have access to this file".to_string()));
|
||||
}
|
||||
|
||||
Ok(Json(file))
|
||||
}
|
||||
|
||||
async fn upload_file(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Extension(config): Extension<Arc<Config>>,
|
||||
Extension(encryption_service): Extension<EncryptionService>,
|
||||
mut multipart: Multipart,
|
||||
) -> Result<Json<FileModel>, AppError> {
|
||||
// Extract file data from multipart form
|
||||
let mut file_name = None;
|
||||
let mut file_data = None;
|
||||
let mut parent_id = None;
|
||||
|
||||
while let Some(field) = multipart.next_field().await.map_err(|e| AppError::InvalidInput(e.to_string()))? {
|
||||
let name = field.name().unwrap_or("").to_string();
|
||||
|
||||
if name == "file" {
|
||||
file_name = field.file_name().map(|s| s.to_string());
|
||||
file_data = Some(field.bytes().await.map_err(|e| AppError::InvalidInput(e.to_string()))?);
|
||||
} else if name == "parent_id" {
|
||||
let parent_id_str = field.text().await.map_err(|e| AppError::InvalidInput(e.to_string()))?;
|
||||
if !parent_id_str.is_empty() {
|
||||
parent_id = Some(Uuid::parse_str(&parent_id_str).map_err(|e| AppError::InvalidInput(e.to_string()))?);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let file_name = file_name.ok_or_else(|| AppError::InvalidInput("File name is required".to_string()))?;
|
||||
let file_data = file_data.ok_or_else(|| AppError::InvalidInput("File data is required".to_string()))?;
|
||||
|
||||
// Check user storage quota
|
||||
let user = crate::models::user::User::find_by_id(&pool, auth_user.id).await?;
|
||||
if user.storage_used + file_data.len() as i64 > user.storage_quota {
|
||||
return Err(AppError::StorageQuotaExceeded("Storage quota exceeded".to_string()));
|
||||
}
|
||||
|
||||
let storage_service = StorageService::new(&config.storage_path);
|
||||
|
||||
// Create file record in database
|
||||
let create_file_dto = CreateFileDto {
|
||||
name: file_name,
|
||||
parent_id,
|
||||
size: file_data.len() as i64,
|
||||
mime_type: mime_guess::from_path(&file_name).first_or_octet_stream().to_string(),
|
||||
};
|
||||
|
||||
let file = FileModel::create(&pool, auth_user.id, create_file_dto).await?;
|
||||
|
||||
// Encrypt and save file to disk
|
||||
let encrypted_data = encryption_service.encrypt(&file_data)?;
|
||||
storage_service.save_file(auth_user.id, file.id, &encrypted_data).await?;
|
||||
|
||||
// Update user storage used
|
||||
crate::models::user::User::update_storage_used(&pool, auth_user.id, file_data.len() as i64).await?;
|
||||
|
||||
Ok(Json(file))
|
||||
}
|
||||
|
||||
async fn download_file(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Extension(config): Extension<Arc<Config>>,
|
||||
Extension(encryption_service): Extension<EncryptionService>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<impl IntoResponse, AppError> {
|
||||
let file = FileModel::find_by_id(&pool, id).await?;
|
||||
|
||||
if file.owner_id != auth_user.id {
|
||||
return Err(AppError::AccessDenied("You don't have access to this file".to_string()));
|
||||
}
|
||||
|
||||
if file.file_type != FileType::File {
|
||||
return Err(AppError::InvalidInput("Cannot download a directory".to_string()));
|
||||
}
|
||||
|
||||
let storage_service = StorageService::new(&config.storage_path);
|
||||
|
||||
let encrypted_data = storage_service.read_file(auth_user.id, file.id).await?;
|
||||
|
||||
let decrypted_data = encryption_service.decrypt(&encrypted_data)?;
|
||||
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
axum::http::header::CONTENT_TYPE,
|
||||
file.mime_type.parse().unwrap_or_else(|_| "application/octet-stream".parse().unwrap()),
|
||||
);
|
||||
headers.insert(
|
||||
axum::http::header::CONTENT_DISPOSITION,
|
||||
format!("attachment; filename=\"{}\"", file.name).parse().unwrap(),
|
||||
);
|
||||
|
||||
let stream = tokio_util::io::ReaderStream::new(std::io::Cursor::new(decrypted_data));
|
||||
let body = StreamBody::new(stream);
|
||||
|
||||
Ok((StatusCode::OK, headers, body))
|
||||
}
|
||||
|
||||
async fn create_directory(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Json(create_dir_dto): Json<CreateDirectoryDto>,
|
||||
) -> Result<Json<FileModel>, AppError> {
|
||||
let directory = FileModel::create_directory(&pool, auth_user.id, create_dir_dto).await?;
|
||||
Ok(Json(directory))
|
||||
}
|
||||
|
||||
async fn delete_file(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Extension(config): Extension<Arc<Config>>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<StatusCode, AppError> {
|
||||
let file = FileModel::find_by_id(&pool, id).await?;
|
||||
if file.owner_id != auth_user.id {
|
||||
return Err(AppError::AccessDenied("You don't have access to this file".to_string()));
|
||||
}
|
||||
let storage_service = StorageService::new(&config.storage_path);
|
||||
if file.file_type == FileType::Directory {
|
||||
FileModel::delete_directory_recursive(&pool, id, auth_user.id, &storage_service).await?;
|
||||
} else {
|
||||
storage_service.delete_file(auth_user.id, file.id).await?;
|
||||
FileModel::delete(&pool, id).await?;
|
||||
crate::models::user::User::update_storage_used(&pool, auth_user.id, -file.size).await?;
|
||||
}
|
||||
|
||||
Ok(StatusCode::NO_CONTENT)
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
mod auth;
|
||||
mod files;
|
||||
mod shares;
|
||||
mod users;
|
||||
|
||||
use axum::Router;
|
||||
|
||||
pub fn api_routes() -> Router {
|
||||
Router::new()
|
||||
.merge(auth::routes())
|
||||
.merge(files::routes())
|
||||
.merge(shares::routes())
|
||||
.merge(users::routes())
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
use axum::{
|
||||
extract::{Extension, Path},
|
||||
http::StatusCode,
|
||||
routing::{delete, get, post},
|
||||
Json, Router,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::AppError;
|
||||
use crate::models::share::{CreateShareDto, Share, ShareResponse};
|
||||
use crate::services::auth::AuthUser;
|
||||
|
||||
pub fn routes() -> Router {
|
||||
Router::new()
|
||||
.route("/shares", get(list_shares))
|
||||
.route("/shares", post(create_share))
|
||||
.route("/shares/:id", get(get_share))
|
||||
.route("/shares/:id", delete(delete_share))
|
||||
.route("/shares/access/:access_key", get(access_shared_file))
|
||||
}
|
||||
|
||||
async fn list_shares(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
) -> Result<Json<Vec<ShareResponse>>, AppError> {
|
||||
let owned_shares = Share::list_by_owner(&pool, auth_user.id).await?;
|
||||
let received_shares = Share::list_by_recipient(&pool, auth_user.id).await?;
|
||||
let mut share_responses = Vec::new();
|
||||
|
||||
for share in owned_shares {
|
||||
let share_response = Share::get_full_share_details(&pool, share.id).await?;
|
||||
share_responses.push(share_response);
|
||||
}
|
||||
|
||||
for share in received_shares {
|
||||
let share_response = Share::get_full_share_details(&pool, share.id).await?;
|
||||
share_responses.push(share_response);
|
||||
}
|
||||
|
||||
Ok(Json(share_responses))
|
||||
}
|
||||
|
||||
async fn create_share(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Json(create_share_dto): Json<CreateShareDto>,
|
||||
) -> Result<Json<Share>, AppError> {
|
||||
let share = Share::create(&pool, auth_user.id, create_share_dto).await?;
|
||||
Ok(Json(share))
|
||||
}
|
||||
|
||||
async fn get_share(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<Json<ShareResponse>, AppError> {
|
||||
let share = Share::find_by_id(&pool, id).await?;
|
||||
if share.owner_id != auth_user.id && share.recipient_id != Some(auth_user.id) {
|
||||
return Err(AppError::AccessDenied("You don't have access to this share".to_string()));
|
||||
}
|
||||
|
||||
let share_response = Share::get_full_share_details(&pool, id).await?;
|
||||
Ok(Json(share_response))
|
||||
}
|
||||
|
||||
async fn delete_share(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Path(id): Path<Uuid>,
|
||||
) -> Result<StatusCode, AppError> {
|
||||
Share::delete(&pool, id, auth_user.id).await?;
|
||||
Ok(StatusCode::NO_CONTENT)
|
||||
}
|
||||
|
||||
async fn access_shared_file(
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Path(access_key): Path<String>,
|
||||
) -> Result<Json<ShareResponse>, AppError> {
|
||||
let share = Share::find_by_access_key(&pool, &access_key).await?;
|
||||
if let Some(expires_at) = share.expires_at {
|
||||
if expires_at < time::OffsetDateTime::now_utc() {
|
||||
return Err(AppError::AccessDenied("This share has expired".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let share_response = Share::get_full_share_details(&pool, share.id).await?;
|
||||
Ok(Json(share_response))
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
use axum::{
|
||||
extract::Extension,
|
||||
http::StatusCode,
|
||||
routing::{get, put},
|
||||
Json, Router,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::PgPool;
|
||||
|
||||
use crate::error::AppError;
|
||||
use crate::models::user::User;
|
||||
use crate::services::auth::AuthUser;
|
||||
|
||||
pub fn routes() -> Router {
|
||||
Router::new()
|
||||
.route("/users/me", get(get_current_user))
|
||||
.route("/users/me/password", put(update_password))
|
||||
}
|
||||
|
||||
async fn get_current_user(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
) -> Result<Json<User>, AppError> {
|
||||
let user = User::find_by_id(&pool, auth_user.id).await?;
|
||||
Ok(Json(user))
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UpdatePasswordDto {
|
||||
pub current_password: String,
|
||||
pub new_password: String,
|
||||
}
|
||||
|
||||
async fn update_password(
|
||||
auth_user: AuthUser,
|
||||
Extension(pool): Extension<PgPool>,
|
||||
Json(update_dto): Json<UpdatePasswordDto>,
|
||||
) -> Result<StatusCode, AppError> {
|
||||
let user = User::find_by_id(&pool, auth_user.id).await?;
|
||||
let is_valid = crate::utils::password::verify_password(
|
||||
&update_dto.current_password,
|
||||
&user.password_hash,
|
||||
)?;
|
||||
|
||||
if !is_valid {
|
||||
return Err(AppError::AuthenticationError("Current password is incorrect".to_string()));
|
||||
}
|
||||
|
||||
let new_password_hash = crate::utils::password::hash_password(&update_dto.new_password)?;
|
||||
|
||||
sqlx::query!(
|
||||
r#"
|
||||
UPDATE users
|
||||
SET password_hash = $1, updated_at = NOW()
|
||||
WHERE id = $2
|
||||
"#,
|
||||
new_password_hash,
|
||||
auth_user.id
|
||||
)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.map_err(AppError::from)?;
|
||||
|
||||
Ok(StatusCode::OK)
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
use axum::{
|
||||
async_trait,
|
||||
extract::{FromRequest, RequestParts, TypedHeader},
|
||||
headers::{authorization::Bearer, Authorization},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
Json,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::config::Config;
|
||||
use crate::error::AppError;
|
||||
use crate::utils::jwt;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AuthUser {
|
||||
pub id: Uuid,
|
||||
pub email: String,
|
||||
pub role: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<B> FromRequest<B> for AuthUser
|
||||
where
|
||||
B: Send,
|
||||
{
|
||||
type Rejection = Response;
|
||||
|
||||
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
|
||||
let TypedHeader(Authorization(bearer)) =
|
||||
TypedHeader::<Authorization<Bearer>>::from_request(req)
|
||||
.await
|
||||
.map_err(|_| {
|
||||
let json = Json(serde_json::json!({
|
||||
"error": "Missing or invalid authorization header"
|
||||
}));
|
||||
(StatusCode::UNAUTHORIZED, json).into_response()
|
||||
})?;
|
||||
|
||||
let config = req
|
||||
.extensions()
|
||||
.get::<Arc<Config>>()
|
||||
.ok_or_else(|| {
|
||||
let json = Json(serde_json::json!({
|
||||
"error": "Server configuration error"
|
||||
}));
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, json).into_response()
|
||||
})?;
|
||||
|
||||
let claims = jwt::verify_token(config, bearer.token()).map_err(|e| {
|
||||
let json = Json(serde_json::json!({
|
||||
"error": format!("Invalid token: {}", e)
|
||||
}));
|
||||
(StatusCode::UNAUTHORIZED, json).into_response()
|
||||
})?;
|
||||
|
||||
let user_id = Uuid::parse_str(&claims.sub).map_err(|_| {
|
||||
let json = Json(serde_json::json!({
|
||||
"error": "Invalid user ID in token"
|
||||
}));
|
||||
(StatusCode::UNAUTHORIZED, json).into_response()
|
||||
})?;
|
||||
|
||||
Ok(AuthUser {
|
||||
id: user_id,
|
||||
email: claims.email,
|
||||
role: claims.role,
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
use aes_gcm::{aead::{Aead, KeyInit}, Aes256Gcm, Nonce};
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use rand::{rngs::OsRng, RngCore};
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
pub struct EncryptionService {
|
||||
cipher: Aes256Gcm,
|
||||
}
|
||||
|
||||
impl EncryptionService {
|
||||
pub fn new(master_key: &str) -> Self {
|
||||
let key_bytes = general_purpose::STANDARD
|
||||
.decode(master_key)
|
||||
.expect("Invalid master key format");
|
||||
let cipher = Aes256Gcm::new_from_slice(&key_bytes)
|
||||
.expect("Invalid key length");
|
||||
|
||||
Self { cipher }
|
||||
}
|
||||
|
||||
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>, AppError> {
|
||||
let mut nonce_bytes = [0u8; 12];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
let ciphertext = self.cipher
|
||||
.encrypt(nonce, data)
|
||||
.map_err(|e| AppError::EncryptionError(e.to_string()))?;
|
||||
let mut result = Vec::with_capacity(nonce_bytes.len() + ciphertext.len());
|
||||
result.extend_from_slice(&nonce_bytes);
|
||||
result.extend_from_slice(&ciphertext);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>, AppError> {
|
||||
if data.len() < 12 {
|
||||
return Err(AppError::EncryptionError("Invalid encrypted data format".to_string()));
|
||||
}
|
||||
|
||||
let nonce = Nonce::from_slice(&data[..12]);
|
||||
let ciphertext = &data[12..];
|
||||
let plaintext = self.cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|e| AppError::EncryptionError(e.to_string()))?;
|
||||
|
||||
Ok(plaintext)
|
||||
}
|
||||
|
||||
pub fn generate_master_key() -> String {
|
||||
let mut key = [0u8; 32];
|
||||
OsRng.fill_bytes(&mut key);
|
||||
|
||||
general_purpose::STANDARD.encode(key)
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
pub mod auth;
|
||||
pub mod encryption;
|
||||
pub mod storage;
|
|
@ -1,77 +0,0 @@
|
|||
use std::path::{Path, PathBuf};
|
||||
use tokio::fs;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
pub struct StorageService {
|
||||
base_path: PathBuf,
|
||||
}
|
||||
|
||||
impl StorageService {
|
||||
pub fn new<P: AsRef<Path>>(base_path: P) -> Self {
|
||||
Self {
|
||||
base_path: PathBuf::from(base_path.as_ref()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn save_file(&self, user_id: Uuid, file_id: Uuid, data: &[u8]) -> Result<(), AppError> {
|
||||
let file_path = self.get_file_path(user_id, file_id);
|
||||
|
||||
|
||||
if let Some(parent) = file_path.parent() {
|
||||
fs::create_dir_all(parent).await
|
||||
.map_err(|e| AppError::IoError(e.to_string()))?;
|
||||
}
|
||||
|
||||
fs::write(&file_path, data).await
|
||||
.map_err(|e| AppError::IoError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn read_file(&self, user_id: Uuid, file_id: Uuid) -> Result<Vec<u8>, AppError> {
|
||||
let file_path = self.get_file_path(user_id, file_id);
|
||||
|
||||
let data = fs::read(&file_path).await
|
||||
.map_err(|e| AppError::IoError(e.to_string()))?;
|
||||
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
pub async fn delete_file(&self, user_id: Uuid, file_id: Uuid) -> Result<(), AppError> {
|
||||
let file_path = self.get_file_path(user_id, file_id);
|
||||
|
||||
if !file_path.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
fs::remove_file(&file_path).await
|
||||
.map_err(|e| AppError::IoError(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_user_directory(&self, user_id: Uuid) -> Result<(), AppError> {
|
||||
let dir_path = self.get_user_directory(user_id);
|
||||
|
||||
if !dir_path.exists() {
|
||||
fs::create_dir_all(&dir_path).await
|
||||
.map_err(|e| AppError::IoError(e.to_string()))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_file_path(&self, user_id: Uuid, file_id: Uuid) -> PathBuf {
|
||||
let mut path = self.get_user_directory(user_id);
|
||||
path.push(file_id.to_string());
|
||||
path
|
||||
}
|
||||
|
||||
pub fn get_user_directory(&self, user_id: Uuid) -> PathBuf {
|
||||
let mut path = self.base_path.clone();
|
||||
path.push(user_id.to_string());
|
||||
path
|
||||
}
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
use crate::config::Config;
|
||||
use crate::error::AppError;
|
||||
use jsonwebtoken::{decode, encode, DecodingKey, EncodingKey, Header, Validation};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use time::{Duration, OffsetDateTime};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Claims {
|
||||
pub sub: String,
|
||||
pub exp: i64,
|
||||
pub iat: i64,
|
||||
pub email: String,
|
||||
pub role: String,
|
||||
}
|
||||
|
||||
pub fn generate_token(
|
||||
config: &Arc<Config>,
|
||||
user_id: Uuid,
|
||||
email: &str,
|
||||
role: &str,
|
||||
) -> Result<String, AppError> {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
let expires_at = now + Duration::seconds(config.jwt_expiration);
|
||||
|
||||
let claims = Claims {
|
||||
sub: user_id.to_string(),
|
||||
exp: expires_at.unix_timestamp(),
|
||||
iat: now.unix_timestamp(),
|
||||
email: email.to_string(),
|
||||
role: role.to_string(),
|
||||
};
|
||||
|
||||
let token = encode(
|
||||
&Header::default(),
|
||||
&claims,
|
||||
&EncodingKey::from_secret(config.jwt_secret.as_bytes()),
|
||||
)
|
||||
.map_err(|e| AppError::JwtError(e.to_string()))?;
|
||||
|
||||
Ok(token)
|
||||
}
|
||||
|
||||
pub fn verify_token(config: &Arc<Config>, token: &str) -> Result<Claims, AppError> {
|
||||
let token_data = decode::<Claims>(
|
||||
token,
|
||||
&DecodingKey::from_secret(config.jwt_secret.as_bytes()),
|
||||
&Validation::default(),
|
||||
)
|
||||
.map_err(|e| AppError::JwtError(e.to_string()))?;
|
||||
|
||||
Ok(token_data.claims)
|
||||
}
|
|
@ -1,2 +0,0 @@
|
|||
pub mod jwt;
|
||||
pub mod password;
|
|
@ -1,24 +0,0 @@
|
|||
use argon2::{password_hash::SaltString, Argon2, PasswordHash, PasswordHasher, PasswordVerifier};
|
||||
use rand::rngs::OsRng;
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
pub fn hash_password(password: &str) -> Result<String, AppError> {
|
||||
let salt = SaltString::generate(&mut OsRng);
|
||||
let argon2 = Argon2::default();
|
||||
|
||||
let password_hash = argon2
|
||||
.hash_password(password.as_bytes(), &salt)
|
||||
.map_err(|e| AppError::PasswordHashError(e.to_string()))?;
|
||||
|
||||
Ok(password_hash.to_string())
|
||||
}
|
||||
|
||||
pub fn verify_password(password: &str, password_hash: &str) -> Result<bool, AppError> {
|
||||
let parsed_hash = PasswordHash::new(password_hash)
|
||||
.map_err(|e| AppError::PasswordHashError(e.to_string()))?;
|
||||
|
||||
let result = Argon2::default().verify_password(password.as_bytes(), &parsed_hash);
|
||||
|
||||
Ok(result.is_ok())
|
||||
}
|
Loading…
Reference in a new issue