mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-06-09 04:23:55 +00:00
Merge remote-tracking branch 'dani/main' into sso-support
This commit is contained in:
commit
a79d6682d4
23 changed files with 1370 additions and 307 deletions
|
@ -15,6 +15,14 @@
|
|||
####################
|
||||
|
||||
## Main data folder
|
||||
## This can be a path to local folder or a path to an external location
|
||||
## depending on features enabled at build time. Possible external locations:
|
||||
##
|
||||
## - AWS S3 Bucket (via `s3` feature): s3://bucket-name/path/to/folder
|
||||
##
|
||||
## When using an external location, make sure to set TMP_FOLDER,
|
||||
## TEMPLATES_FOLDER, and DATABASE_URL to local paths and/or a remote database
|
||||
## location.
|
||||
# DATA_FOLDER=data
|
||||
|
||||
## Individual folders, these override %DATA_FOLDER%
|
||||
|
@ -22,10 +30,13 @@
|
|||
# ICON_CACHE_FOLDER=data/icon_cache
|
||||
# ATTACHMENTS_FOLDER=data/attachments
|
||||
# SENDS_FOLDER=data/sends
|
||||
|
||||
## Temporary folder used for storing temporary file uploads
|
||||
## Must be a local path.
|
||||
# TMP_FOLDER=data/tmp
|
||||
|
||||
## Templates data folder, by default uses embedded templates
|
||||
## Check source code to see the format
|
||||
## HTML template overrides data folder
|
||||
## Must be a local path.
|
||||
# TEMPLATES_FOLDER=data/templates
|
||||
## Automatically reload the templates for every request, slow, use only for development
|
||||
# RELOAD_TEMPLATES=false
|
||||
|
@ -39,7 +50,9 @@
|
|||
#########################
|
||||
|
||||
## Database URL
|
||||
## When using SQLite, this is the path to the DB file, default to %DATA_FOLDER%/db.sqlite3
|
||||
## When using SQLite, this is the path to the DB file, and it defaults to
|
||||
## %DATA_FOLDER%/db.sqlite3. If DATA_FOLDER is set to an external location, this
|
||||
## must be set to a local sqlite3 file path.
|
||||
# DATABASE_URL=data/db.sqlite3
|
||||
## When using MySQL, specify an appropriate connection URI.
|
||||
## Details: https://docs.diesel.rs/2.1.x/diesel/mysql/struct.MysqlConnection.html
|
||||
|
|
1012
Cargo.lock
generated
1012
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
15
Cargo.toml
15
Cargo.toml
|
@ -32,6 +32,7 @@ enable_mimalloc = ["dep:mimalloc"]
|
|||
# You also need to set an env variable `QUERY_LOGGER=1` to fully activate this so you do not have to re-compile
|
||||
# if you want to turn off the logging for a specific run.
|
||||
query_logger = ["dep:diesel_logger"]
|
||||
s3 = ["opendal/services-s3", "dep:aws-config", "dep:aws-credential-types", "dep:anyhow", "dep:reqsign"]
|
||||
|
||||
# OIDC specific features
|
||||
oidc-accept-rfc3339-timestamps = ["openidconnect/accept-rfc3339-timestamps"]
|
||||
|
@ -77,6 +78,7 @@ dashmap = "6.1.0"
|
|||
# Async futures
|
||||
futures = "0.3.31"
|
||||
tokio = { version = "1.45.1", features = ["rt-multi-thread", "fs", "io-util", "parking_lot", "time", "signal", "net"] }
|
||||
tokio-util = { version = "0.7.15", features = ["compat"]}
|
||||
|
||||
# A generic serialization/deserialization framework
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
|
@ -136,7 +138,7 @@ email_address = "0.2.9"
|
|||
handlebars = { version = "6.3.2", features = ["dir_source"] }
|
||||
|
||||
# HTTP client (Used for favicons, version check, DUO and HIBP API)
|
||||
reqwest = { version = "0.12.15", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
reqwest = { version = "0.12.18", features = ["native-tls-alpn", "stream", "json", "gzip", "brotli", "socks", "cookies"] }
|
||||
hickory-resolver = "0.25.2"
|
||||
|
||||
# Favicon extraction libraries
|
||||
|
@ -153,7 +155,7 @@ cookie = "0.18.1"
|
|||
cookie_store = "0.21.1"
|
||||
|
||||
# Used by U2F, JWT and PostgreSQL
|
||||
openssl = "0.10.72"
|
||||
openssl = "0.10.73"
|
||||
|
||||
# CLI argument parsing
|
||||
pico-args = "0.5.0"
|
||||
|
@ -184,6 +186,15 @@ rpassword = "7.4.0"
|
|||
# Loading a dynamic CSS Stylesheet
|
||||
grass_compiler = { version = "0.13.4", default-features = false }
|
||||
|
||||
# File are accessed through Apache OpenDAL
|
||||
opendal = { version = "0.53.3", features = ["services-fs"] }
|
||||
|
||||
# For retrieving AWS credentials, including temporary SSO credentials
|
||||
anyhow = { version = "1.0.98", optional = true }
|
||||
aws-config = { version = "1.6.3", features = ["behavior-version-latest"], optional = true }
|
||||
aws-credential-types = { version = "1.2.3", optional = true }
|
||||
reqsign = { version = "0.16.3", optional = true }
|
||||
|
||||
# Strip debuginfo from the release builds
|
||||
# The debug symbols are to provide better panic traces
|
||||
# Also enable fat LTO and use 1 codegen unit for optimizations
|
||||
|
|
3
build.rs
3
build.rs
|
@ -11,6 +11,8 @@ fn main() {
|
|||
println!("cargo:rustc-cfg=postgresql");
|
||||
#[cfg(feature = "query_logger")]
|
||||
println!("cargo:rustc-cfg=query_logger");
|
||||
#[cfg(feature = "s3")]
|
||||
println!("cargo:rustc-cfg=s3");
|
||||
|
||||
#[cfg(not(any(feature = "sqlite", feature = "mysql", feature = "postgresql")))]
|
||||
compile_error!(
|
||||
|
@ -23,6 +25,7 @@ fn main() {
|
|||
println!("cargo::rustc-check-cfg=cfg(mysql)");
|
||||
println!("cargo::rustc-check-cfg=cfg(postgresql)");
|
||||
println!("cargo::rustc-check-cfg=cfg(query_logger)");
|
||||
println!("cargo::rustc-check-cfg=cfg(s3)");
|
||||
|
||||
// Rerun when these paths are changed.
|
||||
// Someone could have checked-out a tag or specific commit, but no other files changed.
|
||||
|
|
|
@ -641,7 +641,7 @@ use cached::proc_macro::cached;
|
|||
/// It will cache this function for 600 seconds (10 minutes) which should prevent the exhaustion of the rate limit
|
||||
/// Any cache will be lost if Vaultwarden is restarted
|
||||
#[cached(time = 600, sync_writes = "default")]
|
||||
async fn get_release_info(has_http_access: bool, running_within_container: bool) -> (String, String, String) {
|
||||
async fn get_release_info(has_http_access: bool) -> (String, String, String) {
|
||||
// If the HTTP Check failed, do not even attempt to check for new versions since we were not able to connect with github.com anyway.
|
||||
if has_http_access {
|
||||
(
|
||||
|
@ -660,17 +660,11 @@ async fn get_release_info(has_http_access: bool, running_within_container: bool)
|
|||
},
|
||||
// Do not fetch the web-vault version when running within a container
|
||||
// The web-vault version is embedded within the container it self, and should not be updated manually
|
||||
if running_within_container {
|
||||
"-".to_string()
|
||||
} else {
|
||||
match get_json_api::<GitRelease>(
|
||||
"https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest",
|
||||
)
|
||||
match get_json_api::<GitRelease>("https://api.github.com/repos/dani-garcia/bw_web_builds/releases/latest")
|
||||
.await
|
||||
{
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
}
|
||||
{
|
||||
Ok(r) => r.tag_name.trim_start_matches('v').to_string(),
|
||||
_ => "-".to_string(),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
|
@ -716,8 +710,7 @@ async fn diagnostics(_token: AdminToken, ip_header: IpHeader, mut conn: DbConn)
|
|||
_ => "Unable to resolve domain name.".to_string(),
|
||||
};
|
||||
|
||||
let (latest_release, latest_commit, latest_web_build) =
|
||||
get_release_info(has_http_access, running_within_container).await;
|
||||
let (latest_release, latest_commit, latest_web_build) = get_release_info(has_http_access).await;
|
||||
|
||||
let ip_header_name = &ip_header.0.unwrap_or_default();
|
||||
|
||||
|
@ -780,17 +773,17 @@ fn get_diagnostics_http(code: u16, _token: AdminToken) -> EmptyResult {
|
|||
}
|
||||
|
||||
#[post("/config", format = "application/json", data = "<data>")]
|
||||
fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
async fn post_config(data: Json<ConfigBuilder>, _token: AdminToken) -> EmptyResult {
|
||||
let data: ConfigBuilder = data.into_inner();
|
||||
if let Err(e) = CONFIG.update_config(data, true) {
|
||||
if let Err(e) = CONFIG.update_config(data, true).await {
|
||||
err!(format!("Unable to save config: {e:?}"))
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[post("/config/delete", format = "application/json")]
|
||||
fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
if let Err(e) = CONFIG.delete_user_config() {
|
||||
async fn delete_config(_token: AdminToken) -> EmptyResult {
|
||||
if let Err(e) = CONFIG.delete_user_config().await {
|
||||
err!(format!("Unable to delete config: {e:?}"))
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -11,10 +11,11 @@ use rocket::{
|
|||
use serde_json::Value;
|
||||
|
||||
use crate::auth::ClientVersion;
|
||||
use crate::util::NumberOrString;
|
||||
use crate::util::{save_temp_file, NumberOrString};
|
||||
use crate::{
|
||||
api::{self, core::log_event, EmptyResult, JsonResult, Notify, PasswordOrOtpData, UpdateType},
|
||||
auth::Headers,
|
||||
config::PathType,
|
||||
crypto,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
CONFIG,
|
||||
|
@ -105,12 +106,7 @@ struct SyncData {
|
|||
}
|
||||
|
||||
#[get("/sync?<data..>")]
|
||||
async fn sync(
|
||||
data: SyncData,
|
||||
headers: Headers,
|
||||
client_version: Option<ClientVersion>,
|
||||
mut conn: DbConn,
|
||||
) -> Json<Value> {
|
||||
async fn sync(data: SyncData, headers: Headers, client_version: Option<ClientVersion>, mut conn: DbConn) -> JsonResult {
|
||||
let user_json = headers.user.to_json(&mut conn).await;
|
||||
|
||||
// Get all ciphers which are visible by the user
|
||||
|
@ -134,7 +130,7 @@ async fn sync(
|
|||
for c in ciphers {
|
||||
ciphers_json.push(
|
||||
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
|
||||
.await,
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -159,7 +155,7 @@ async fn sync(
|
|||
api::core::_get_eq_domains(headers, true).into_inner()
|
||||
};
|
||||
|
||||
Json(json!({
|
||||
Ok(Json(json!({
|
||||
"profile": user_json,
|
||||
"folders": folders_json,
|
||||
"collections": collections_json,
|
||||
|
@ -168,11 +164,11 @@ async fn sync(
|
|||
"domains": domains_json,
|
||||
"sends": sends_json,
|
||||
"object": "sync"
|
||||
}))
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/ciphers")]
|
||||
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
||||
async fn get_ciphers(headers: Headers, mut conn: DbConn) -> JsonResult {
|
||||
let ciphers = Cipher::find_by_user_visible(&headers.user.uuid, &mut conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(&headers.user.uuid, CipherSyncType::User, &mut conn).await;
|
||||
|
||||
|
@ -180,15 +176,15 @@ async fn get_ciphers(headers: Headers, mut conn: DbConn) -> Json<Value> {
|
|||
for c in ciphers {
|
||||
ciphers_json.push(
|
||||
c.to_json(&headers.host, &headers.user.uuid, Some(&cipher_sync_data), CipherSyncType::User, &mut conn)
|
||||
.await,
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
Json(json!({
|
||||
Ok(Json(json!({
|
||||
"data": ciphers_json,
|
||||
"object": "list",
|
||||
"continuationToken": null
|
||||
}))
|
||||
})))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>")]
|
||||
|
@ -201,7 +197,7 @@ async fn get_cipher(cipher_id: CipherId, headers: Headers, mut conn: DbConn) ->
|
|||
err!("Cipher is not owned by user")
|
||||
}
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[get("/ciphers/<cipher_id>/admin")]
|
||||
|
@ -339,7 +335,7 @@ async fn post_ciphers(data: Json<CipherData>, headers: Headers, mut conn: DbConn
|
|||
let mut cipher = Cipher::new(data.r#type, data.name.clone());
|
||||
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherCreate).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
/// Enforces the personal ownership policy on user-owned ciphers, if applicable.
|
||||
|
@ -676,7 +672,7 @@ async fn put_cipher(
|
|||
|
||||
update_cipher_from_data(&mut cipher, data, &headers, None, &mut conn, &nt, UpdateType::SyncCipherUpdate).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[post("/ciphers/<cipher_id>/partial", data = "<data>")]
|
||||
|
@ -714,7 +710,7 @@ async fn put_cipher_partial(
|
|||
// Update favorite
|
||||
cipher.set_favorite(Some(data.favorite), &headers.user.uuid, &mut conn).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
@ -825,7 +821,7 @@ async fn post_collections_update(
|
|||
)
|
||||
.await;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/collections-admin", data = "<data>")]
|
||||
|
@ -1030,7 +1026,7 @@ async fn share_cipher_by_uuid(
|
|||
|
||||
update_cipher_from_data(&mut cipher, data.cipher, headers, Some(shared_to_collections), conn, nt, ut).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
|
||||
}
|
||||
|
||||
/// v2 API for downloading an attachment. This just redirects the client to
|
||||
|
@ -1055,7 +1051,7 @@ async fn get_attachment(
|
|||
}
|
||||
|
||||
match Attachment::find_by_id(&attachment_id, &mut conn).await {
|
||||
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host))),
|
||||
Some(attachment) if cipher_id == attachment.cipher_uuid => Ok(Json(attachment.to_json(&headers.host).await?)),
|
||||
Some(_) => err!("Attachment doesn't belong to cipher"),
|
||||
None => err!("Attachment doesn't exist"),
|
||||
}
|
||||
|
@ -1116,7 +1112,7 @@ async fn post_attachment_v2(
|
|||
"attachmentId": attachment_id,
|
||||
"url": url,
|
||||
"fileUploadType": FileUploadType::Direct as i32,
|
||||
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await,
|
||||
response_key: cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?,
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -1142,7 +1138,7 @@ async fn save_attachment(
|
|||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> Result<(Cipher, DbConn), crate::error::Error> {
|
||||
let mut data = data.into_inner();
|
||||
let data = data.into_inner();
|
||||
|
||||
let Some(size) = data.data.len().to_i64() else {
|
||||
err!("Attachment data size overflow");
|
||||
|
@ -1269,13 +1265,7 @@ async fn save_attachment(
|
|||
attachment.save(&mut conn).await.expect("Error saving attachment");
|
||||
}
|
||||
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.attachments_folder()).await?.join(cipher_id.as_ref());
|
||||
let file_path = folder_path.join(file_id.as_ref());
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
save_temp_file(PathType::Attachments, &format!("{cipher_id}/{file_id}"), data.data, true).await?;
|
||||
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
|
@ -1342,7 +1332,7 @@ async fn post_attachment(
|
|||
|
||||
let (cipher, mut conn) = save_attachment(attachment, cipher_id, data, &headers, conn, nt).await?;
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, &mut conn).await?))
|
||||
}
|
||||
|
||||
#[post("/ciphers/<cipher_id>/attachment-admin", format = "multipart/form-data", data = "<data>")]
|
||||
|
@ -1786,7 +1776,7 @@ async fn _restore_cipher_by_uuid(
|
|||
.await;
|
||||
}
|
||||
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await))
|
||||
Ok(Json(cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?))
|
||||
}
|
||||
|
||||
async fn _restore_multiple_ciphers(
|
||||
|
@ -1859,7 +1849,7 @@ async fn _delete_cipher_attachment_by_id(
|
|||
)
|
||||
.await;
|
||||
}
|
||||
let cipher_json = cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await;
|
||||
let cipher_json = cipher.to_json(&headers.host, &headers.user.uuid, None, CipherSyncType::User, conn).await?;
|
||||
Ok(Json(json!({"cipher":cipher_json})))
|
||||
}
|
||||
|
||||
|
|
|
@ -582,7 +582,7 @@ async fn view_emergency_access(emer_id: EmergencyAccessId, headers: Headers, mut
|
|||
CipherSyncType::User,
|
||||
&mut conn,
|
||||
)
|
||||
.await,
|
||||
.await?,
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -948,21 +948,26 @@ async fn get_org_details(data: OrgIdData, headers: OrgMemberHeaders, mut conn: D
|
|||
}
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await,
|
||||
"data": _get_org_details(&data.organization_id, &headers.host, &headers.user.uuid, &mut conn).await?,
|
||||
"object": "list",
|
||||
"continuationToken": null,
|
||||
})))
|
||||
}
|
||||
|
||||
async fn _get_org_details(org_id: &OrganizationId, host: &str, user_id: &UserId, conn: &mut DbConn) -> Value {
|
||||
async fn _get_org_details(
|
||||
org_id: &OrganizationId,
|
||||
host: &str,
|
||||
user_id: &UserId,
|
||||
conn: &mut DbConn,
|
||||
) -> Result<Value, crate::Error> {
|
||||
let ciphers = Cipher::find_by_org(org_id, conn).await;
|
||||
let cipher_sync_data = CipherSyncData::new(user_id, CipherSyncType::Organization, conn).await;
|
||||
|
||||
let mut ciphers_json = Vec::with_capacity(ciphers.len());
|
||||
for c in ciphers {
|
||||
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await);
|
||||
ciphers_json.push(c.to_json(host, user_id, Some(&cipher_sync_data), CipherSyncType::Organization, conn).await?);
|
||||
}
|
||||
json!(ciphers_json)
|
||||
Ok(json!(ciphers_json))
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
@ -3415,7 +3420,7 @@ async fn get_org_export(org_id: OrganizationId, headers: AdminHeaders, mut conn:
|
|||
|
||||
Ok(Json(json!({
|
||||
"collections": convert_json_key_lcase_first(_get_org_collections(&org_id, &mut conn).await),
|
||||
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await),
|
||||
"ciphers": convert_json_key_lcase_first(_get_org_details(&org_id, &headers.host, &headers.user.uuid, &mut conn).await?),
|
||||
})))
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
use num_traits::ToPrimitive;
|
||||
|
@ -12,8 +13,9 @@ use serde_json::Value;
|
|||
use crate::{
|
||||
api::{ApiResult, EmptyResult, JsonResult, Notify, UpdateType},
|
||||
auth::{ClientIp, Headers, Host},
|
||||
config::PathType,
|
||||
db::{models::*, DbConn, DbPool},
|
||||
util::NumberOrString,
|
||||
util::{save_temp_file, NumberOrString},
|
||||
CONFIG,
|
||||
};
|
||||
|
||||
|
@ -228,7 +230,7 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||
|
||||
let UploadData {
|
||||
model,
|
||||
mut data,
|
||||
data,
|
||||
} = data.into_inner();
|
||||
let model = model.into_inner();
|
||||
|
||||
|
@ -268,13 +270,8 @@ async fn post_send_file(data: Form<UploadData<'_>>, headers: Headers, mut conn:
|
|||
}
|
||||
|
||||
let file_id = crate::crypto::generate_send_file_id();
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(&send.uuid);
|
||||
let file_path = folder_path.join(&file_id);
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.persist_to(&file_path).await {
|
||||
data.move_copy_to(file_path).await?
|
||||
}
|
||||
save_temp_file(PathType::Sends, &format!("{}/{file_id}", send.uuid), data, true).await?;
|
||||
|
||||
let mut data_value: Value = serde_json::from_str(&send.data)?;
|
||||
if let Some(o) = data_value.as_object_mut() {
|
||||
|
@ -381,7 +378,7 @@ async fn post_send_file_v2_data(
|
|||
) -> EmptyResult {
|
||||
enforce_disable_send_policy(&headers, &mut conn).await?;
|
||||
|
||||
let mut data = data.into_inner();
|
||||
let data = data.into_inner();
|
||||
|
||||
let Some(send) = Send::find_by_uuid_and_user(&send_id, &headers.user.uuid, &mut conn).await else {
|
||||
err!("Send not found. Unable to save the file.", "Invalid send uuid or does not belong to user.")
|
||||
|
@ -424,19 +421,9 @@ async fn post_send_file_v2_data(
|
|||
err!("Send file size does not match.", format!("Expected a file size of {} got {size}", send_data.size));
|
||||
}
|
||||
|
||||
let folder_path = tokio::fs::canonicalize(&CONFIG.sends_folder()).await?.join(send_id);
|
||||
let file_path = folder_path.join(file_id);
|
||||
let file_path = format!("{send_id}/{file_id}");
|
||||
|
||||
// Check if the file already exists, if that is the case do not overwrite it
|
||||
if tokio::fs::metadata(&file_path).await.is_ok() {
|
||||
err!("Send file has already been uploaded.", format!("File {file_path:?} already exists"))
|
||||
}
|
||||
|
||||
tokio::fs::create_dir_all(&folder_path).await?;
|
||||
|
||||
if let Err(_err) = data.data.persist_to(&file_path).await {
|
||||
data.data.move_copy_to(file_path).await?
|
||||
}
|
||||
save_temp_file(PathType::Sends, &file_path, data.data, false).await?;
|
||||
|
||||
nt.send_send_update(
|
||||
UpdateType::SyncSendCreate,
|
||||
|
@ -569,15 +556,26 @@ async fn post_access_file(
|
|||
)
|
||||
.await;
|
||||
|
||||
let token_claims = crate::auth::generate_send_claims(&send_id, &file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
Ok(Json(json!({
|
||||
"object": "send-fileDownload",
|
||||
"id": file_id,
|
||||
"url": format!("{}/api/sends/{send_id}/{file_id}?t={token}", &host.host)
|
||||
"url": download_url(&host, &send_id, &file_id).await?,
|
||||
})))
|
||||
}
|
||||
|
||||
async fn download_url(host: &Host, send_id: &SendId, file_id: &SendFileId) -> Result<String, crate::Error> {
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
|
||||
|
||||
if operator.info().scheme() == opendal::Scheme::Fs {
|
||||
let token_claims = crate::auth::generate_send_claims(send_id, file_id);
|
||||
let token = crate::auth::encode_jwt(&token_claims);
|
||||
|
||||
Ok(format!("{}/api/sends/{send_id}/{file_id}?t={token}", &host.host))
|
||||
} else {
|
||||
Ok(operator.presign_read(&format!("{send_id}/{file_id}"), Duration::from_secs(5 * 60)).await?.uri().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/sends/<send_id>/<file_id>?<t>")]
|
||||
async fn download_send(send_id: SendId, file_id: SendFileId, t: &str) -> Option<NamedFile> {
|
||||
if let Ok(claims) = crate::auth::decode_send(t) {
|
||||
|
|
|
@ -261,7 +261,7 @@ pub(crate) async fn get_duo_keys_email(email: &str, conn: &mut DbConn) -> ApiRes
|
|||
}
|
||||
.map_res("Can't fetch Duo Keys")?;
|
||||
|
||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey(), data.host))
|
||||
Ok((data.ik, data.sk, CONFIG.get_duo_akey().await, data.host))
|
||||
}
|
||||
|
||||
pub async fn generate_duo_signature(email: &str, conn: &mut DbConn) -> ApiResult<(String, String)> {
|
||||
|
|
|
@ -14,14 +14,11 @@ use reqwest::{
|
|||
Client, Response,
|
||||
};
|
||||
use rocket::{http::ContentType, response::Redirect, Route};
|
||||
use tokio::{
|
||||
fs::{create_dir_all, remove_file, symlink_metadata, File},
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
};
|
||||
|
||||
use html5gum::{Emitter, HtmlString, Readable, StringReader, Tokenizer};
|
||||
|
||||
use crate::{
|
||||
config::PathType,
|
||||
error::Error,
|
||||
http_client::{get_reqwest_client_builder, should_block_address, CustomHttpClientError},
|
||||
util::Cached,
|
||||
|
@ -158,7 +155,7 @@ fn is_valid_domain(domain: &str) -> bool {
|
|||
}
|
||||
|
||||
async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
||||
let path = format!("{}/{domain}.png", CONFIG.icon_cache_folder());
|
||||
let path = format!("{domain}.png");
|
||||
|
||||
// Check for expiration of negatively cached copy
|
||||
if icon_is_negcached(&path).await {
|
||||
|
@ -177,7 +174,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
|||
// Get the icon, or None in case of error
|
||||
match download_icon(domain).await {
|
||||
Ok((icon, icon_type)) => {
|
||||
save_icon(&path, &icon).await;
|
||||
save_icon(&path, icon.to_vec()).await;
|
||||
Some((icon.to_vec(), icon_type.unwrap_or("x-icon").to_string()))
|
||||
}
|
||||
Err(e) => {
|
||||
|
@ -190,7 +187,7 @@ async fn get_icon(domain: &str) -> Option<(Vec<u8>, String)> {
|
|||
|
||||
warn!("Unable to download icon: {e:?}");
|
||||
let miss_indicator = path + ".miss";
|
||||
save_icon(&miss_indicator, &[]).await;
|
||||
save_icon(&miss_indicator, vec![]).await;
|
||||
None
|
||||
}
|
||||
}
|
||||
|
@ -203,11 +200,9 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
|||
}
|
||||
|
||||
// Try to read the cached icon, and return it if it exists
|
||||
if let Ok(mut f) = File::open(path).await {
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
if f.read_to_end(&mut buffer).await.is_ok() {
|
||||
return Some(buffer);
|
||||
if let Ok(operator) = CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||
if let Ok(buf) = operator.read(path).await {
|
||||
return Some(buf.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -215,9 +210,11 @@ async fn get_cached_icon(path: &str) -> Option<Vec<u8>> {
|
|||
}
|
||||
|
||||
async fn file_is_expired(path: &str, ttl: u64) -> Result<bool, Error> {
|
||||
let meta = symlink_metadata(path).await?;
|
||||
let modified = meta.modified()?;
|
||||
let age = SystemTime::now().duration_since(modified)?;
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::IconCache)?;
|
||||
let meta = operator.stat(path).await?;
|
||||
let modified =
|
||||
meta.last_modified().ok_or_else(|| std::io::Error::other(format!("No last modified time for `{path}`")))?;
|
||||
let age = SystemTime::now().duration_since(modified.into())?;
|
||||
|
||||
Ok(ttl > 0 && ttl <= age.as_secs())
|
||||
}
|
||||
|
@ -229,8 +226,13 @@ async fn icon_is_negcached(path: &str) -> bool {
|
|||
match expired {
|
||||
// No longer negatively cached, drop the marker
|
||||
Ok(true) => {
|
||||
if let Err(e) = remove_file(&miss_indicator).await {
|
||||
error!("Could not remove negative cache indicator for icon {path:?}: {e:?}");
|
||||
match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||
Ok(operator) => {
|
||||
if let Err(e) = operator.delete(&miss_indicator).await {
|
||||
error!("Could not remove negative cache indicator for icon {path:?}: {e:?}");
|
||||
}
|
||||
}
|
||||
Err(e) => error!("Could not remove negative cache indicator for icon {path:?}: {e:?}"),
|
||||
}
|
||||
false
|
||||
}
|
||||
|
@ -564,17 +566,17 @@ async fn download_icon(domain: &str) -> Result<(Bytes, Option<&str>), Error> {
|
|||
Ok((buffer, icon_type))
|
||||
}
|
||||
|
||||
async fn save_icon(path: &str, icon: &[u8]) {
|
||||
match File::create(path).await {
|
||||
Ok(mut f) => {
|
||||
f.write_all(icon).await.expect("Error writing icon file");
|
||||
}
|
||||
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
create_dir_all(&CONFIG.icon_cache_folder()).await.expect("Error creating icon cache folder");
|
||||
}
|
||||
async fn save_icon(path: &str, icon: Vec<u8>) {
|
||||
let operator = match CONFIG.opendal_operator_for_path_type(PathType::IconCache) {
|
||||
Ok(operator) => operator,
|
||||
Err(e) => {
|
||||
warn!("Unable to save icon: {e:?}");
|
||||
warn!("Failed to get OpenDAL operator while saving icon: {e}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = operator.write(path, icon).await {
|
||||
warn!("Unable to save icon: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
58
src/auth.rs
58
src/auth.rs
|
@ -6,15 +6,11 @@ use once_cell::sync::{Lazy, OnceCell};
|
|||
use openssl::rsa::Rsa;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::ser::Serialize;
|
||||
use std::{
|
||||
env,
|
||||
fs::File,
|
||||
io::{Read, Write},
|
||||
net::IpAddr,
|
||||
};
|
||||
use std::{env, net::IpAddr};
|
||||
|
||||
use crate::{
|
||||
api::ApiResult,
|
||||
config::PathType,
|
||||
db::models::{
|
||||
AttachmentId, CipherId, CollectionId, DeviceId, DeviceType, EmergencyAccessId, MembershipId, OrgApiKeyId,
|
||||
OrganizationId, SendFileId, SendId, UserId,
|
||||
|
@ -48,37 +44,33 @@ static JWT_REGISTER_VERIFY_ISSUER: Lazy<String> = Lazy::new(|| format!("{}|regis
|
|||
static PRIVATE_RSA_KEY: OnceCell<EncodingKey> = OnceCell::new();
|
||||
static PUBLIC_RSA_KEY: OnceCell<DecodingKey> = OnceCell::new();
|
||||
|
||||
pub fn initialize_keys() -> Result<(), Error> {
|
||||
fn read_key(create_if_missing: bool) -> Result<(Rsa<openssl::pkey::Private>, Vec<u8>), Error> {
|
||||
let mut priv_key_buffer = Vec::with_capacity(2048);
|
||||
pub async fn initialize_keys() -> Result<(), Error> {
|
||||
use std::io::Error;
|
||||
|
||||
let mut priv_key_file = File::options()
|
||||
.create(create_if_missing)
|
||||
.truncate(false)
|
||||
.read(true)
|
||||
.write(create_if_missing)
|
||||
.open(CONFIG.private_rsa_key())?;
|
||||
let rsa_key_filename = std::path::PathBuf::from(CONFIG.private_rsa_key())
|
||||
.file_name()
|
||||
.ok_or_else(|| Error::other("Private RSA key path missing filename"))?
|
||||
.to_str()
|
||||
.ok_or_else(|| Error::other("Private RSA key path filename is not valid UTF-8"))?
|
||||
.to_string();
|
||||
|
||||
#[allow(clippy::verbose_file_reads)]
|
||||
let bytes_read = priv_key_file.read_to_end(&mut priv_key_buffer)?;
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::RsaKey).map_err(Error::other)?;
|
||||
|
||||
let rsa_key = if bytes_read > 0 {
|
||||
Rsa::private_key_from_pem(&priv_key_buffer[..bytes_read])?
|
||||
} else if create_if_missing {
|
||||
// Only create the key if the file doesn't exist or is empty
|
||||
let rsa_key = Rsa::generate(2048)?;
|
||||
priv_key_buffer = rsa_key.private_key_to_pem()?;
|
||||
priv_key_file.write_all(&priv_key_buffer)?;
|
||||
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
|
||||
rsa_key
|
||||
} else {
|
||||
err!("Private key does not exist or invalid format", CONFIG.private_rsa_key());
|
||||
};
|
||||
let priv_key_buffer = match operator.read(&rsa_key_filename).await {
|
||||
Ok(buffer) => Some(buffer),
|
||||
Err(e) if e.kind() == opendal::ErrorKind::NotFound => None,
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
Ok((rsa_key, priv_key_buffer))
|
||||
}
|
||||
|
||||
let (priv_key, priv_key_buffer) = read_key(true).or_else(|_| read_key(false))?;
|
||||
let (priv_key, priv_key_buffer) = if let Some(priv_key_buffer) = priv_key_buffer {
|
||||
(Rsa::private_key_from_pem(priv_key_buffer.to_vec().as_slice())?, priv_key_buffer.to_vec())
|
||||
} else {
|
||||
let rsa_key = Rsa::generate(2048)?;
|
||||
let priv_key_buffer = rsa_key.private_key_to_pem()?;
|
||||
operator.write(&rsa_key_filename, priv_key_buffer.clone()).await?;
|
||||
info!("Private key '{}' created correctly", CONFIG.private_rsa_key());
|
||||
(rsa_key, priv_key_buffer)
|
||||
};
|
||||
let pub_key_buffer = priv_key.public_key_to_pem()?;
|
||||
|
||||
let enc = EncodingKey::from_rsa_pem(&priv_key_buffer)?;
|
||||
|
|
158
src/config.rs
158
src/config.rs
|
@ -3,7 +3,7 @@ use std::{
|
|||
process::exit,
|
||||
sync::{
|
||||
atomic::{AtomicBool, Ordering},
|
||||
RwLock,
|
||||
LazyLock, RwLock,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -22,10 +22,32 @@ static CONFIG_FILE: Lazy<String> = Lazy::new(|| {
|
|||
get_env("CONFIG_FILE").unwrap_or_else(|| format!("{data_folder}/config.json"))
|
||||
});
|
||||
|
||||
static CONFIG_FILE_PARENT_DIR: LazyLock<String> = LazyLock::new(|| {
|
||||
let path = std::path::PathBuf::from(&*CONFIG_FILE);
|
||||
path.parent().unwrap_or(std::path::Path::new("data")).to_str().unwrap_or("data").to_string()
|
||||
});
|
||||
|
||||
static CONFIG_FILENAME: LazyLock<String> = LazyLock::new(|| {
|
||||
let path = std::path::PathBuf::from(&*CONFIG_FILE);
|
||||
path.file_name().unwrap_or(std::ffi::OsStr::new("config.json")).to_str().unwrap_or("config.json").to_string()
|
||||
});
|
||||
|
||||
pub static SKIP_CONFIG_VALIDATION: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
pub static CONFIG: Lazy<Config> = Lazy::new(|| {
|
||||
Config::load().unwrap_or_else(|e| {
|
||||
std::thread::spawn(|| {
|
||||
let rt = tokio::runtime::Builder::new_current_thread().enable_all().build().unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
exit(12)
|
||||
});
|
||||
|
||||
rt.block_on(Config::load()).unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
exit(12)
|
||||
})
|
||||
})
|
||||
.join()
|
||||
.unwrap_or_else(|e| {
|
||||
println!("Error loading config:\n {e:?}\n");
|
||||
exit(12)
|
||||
})
|
||||
|
@ -110,10 +132,11 @@ macro_rules! make_config {
|
|||
builder
|
||||
}
|
||||
|
||||
fn from_file(path: &str) -> Result<Self, Error> {
|
||||
let config_str = std::fs::read_to_string(path)?;
|
||||
println!("[INFO] Using saved config from `{path}` for configuration.\n");
|
||||
serde_json::from_str(&config_str).map_err(Into::into)
|
||||
async fn from_file() -> Result<Self, Error> {
|
||||
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||
let config_bytes = operator.read(&CONFIG_FILENAME).await?;
|
||||
println!("[INFO] Using saved config from `{}` for configuration.\n", *CONFIG_FILE);
|
||||
serde_json::from_slice(&config_bytes.to_vec()).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn clear_non_editable(&mut self) {
|
||||
|
@ -1241,11 +1264,93 @@ fn parse_param_list(config: String, separator: char, kv_separator: char) -> Resu
|
|||
.collect()
|
||||
}
|
||||
|
||||
fn opendal_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
|
||||
// Cache of previously built operators by path
|
||||
static OPERATORS_BY_PATH: LazyLock<dashmap::DashMap<String, opendal::Operator>> =
|
||||
LazyLock::new(dashmap::DashMap::new);
|
||||
|
||||
if let Some(operator) = OPERATORS_BY_PATH.get(path) {
|
||||
return Ok(operator.clone());
|
||||
}
|
||||
|
||||
let operator = if path.starts_with("s3://") {
|
||||
#[cfg(not(s3))]
|
||||
return Err(opendal::Error::new(opendal::ErrorKind::ConfigInvalid, "S3 support is not enabled").into());
|
||||
|
||||
#[cfg(s3)]
|
||||
opendal_s3_operator_for_path(path)?
|
||||
} else {
|
||||
let builder = opendal::services::Fs::default().root(path);
|
||||
opendal::Operator::new(builder)?.finish()
|
||||
};
|
||||
|
||||
OPERATORS_BY_PATH.insert(path.to_string(), operator.clone());
|
||||
|
||||
Ok(operator)
|
||||
}
|
||||
|
||||
#[cfg(s3)]
|
||||
fn opendal_s3_operator_for_path(path: &str) -> Result<opendal::Operator, Error> {
|
||||
// This is a custom AWS credential loader that uses the official AWS Rust
|
||||
// SDK config crate to load credentials. This ensures maximum compatibility
|
||||
// with AWS credential configurations. For example, OpenDAL doesn't support
|
||||
// AWS SSO temporary credentials yet.
|
||||
struct OpenDALS3CredentialLoader {}
|
||||
|
||||
#[async_trait]
|
||||
impl reqsign::AwsCredentialLoad for OpenDALS3CredentialLoader {
|
||||
async fn load_credential(&self, _client: reqwest::Client) -> anyhow::Result<Option<reqsign::AwsCredential>> {
|
||||
use aws_credential_types::provider::ProvideCredentials as _;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
static DEFAULT_CREDENTIAL_CHAIN: OnceCell<
|
||||
aws_config::default_provider::credentials::DefaultCredentialsChain,
|
||||
> = OnceCell::const_new();
|
||||
|
||||
let chain = DEFAULT_CREDENTIAL_CHAIN
|
||||
.get_or_init(|| aws_config::default_provider::credentials::DefaultCredentialsChain::builder().build())
|
||||
.await;
|
||||
|
||||
let creds = chain.provide_credentials().await?;
|
||||
|
||||
Ok(Some(reqsign::AwsCredential {
|
||||
access_key_id: creds.access_key_id().to_string(),
|
||||
secret_access_key: creds.secret_access_key().to_string(),
|
||||
session_token: creds.session_token().map(|s| s.to_string()),
|
||||
expires_in: creds.expiry().map(|expiration| expiration.into()),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
const OPEN_DAL_S3_CREDENTIAL_LOADER: OpenDALS3CredentialLoader = OpenDALS3CredentialLoader {};
|
||||
|
||||
let url = Url::parse(path).map_err(|e| format!("Invalid path S3 URL path {path:?}: {e}"))?;
|
||||
|
||||
let bucket = url.host_str().ok_or_else(|| format!("Missing Bucket name in data folder S3 URL {path:?}"))?;
|
||||
|
||||
let builder = opendal::services::S3::default()
|
||||
.customized_credential_load(Box::new(OPEN_DAL_S3_CREDENTIAL_LOADER))
|
||||
.enable_virtual_host_style()
|
||||
.bucket(bucket)
|
||||
.root(url.path())
|
||||
.default_storage_class("INTELLIGENT_TIERING");
|
||||
|
||||
Ok(opendal::Operator::new(builder)?.finish())
|
||||
}
|
||||
|
||||
pub enum PathType {
|
||||
Data,
|
||||
IconCache,
|
||||
Attachments,
|
||||
Sends,
|
||||
RsaKey,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load() -> Result<Self, Error> {
|
||||
pub async fn load() -> Result<Self, Error> {
|
||||
// Loading from env and file
|
||||
let _env = ConfigBuilder::from_env();
|
||||
let _usr = ConfigBuilder::from_file(&CONFIG_FILE).unwrap_or_default();
|
||||
let _usr = ConfigBuilder::from_file().await.unwrap_or_default();
|
||||
|
||||
// Create merged config, config file overwrites env
|
||||
let mut _overrides = Vec::new();
|
||||
|
@ -1269,7 +1374,7 @@ impl Config {
|
|||
})
|
||||
}
|
||||
|
||||
pub fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
|
||||
pub async fn update_config(&self, other: ConfigBuilder, ignore_non_editable: bool) -> Result<(), Error> {
|
||||
// Remove default values
|
||||
//let builder = other.remove(&self.inner.read().unwrap()._env);
|
||||
|
||||
|
@ -1301,20 +1406,19 @@ impl Config {
|
|||
}
|
||||
|
||||
//Save to file
|
||||
use std::{fs::File, io::Write};
|
||||
let mut file = File::create(&*CONFIG_FILE)?;
|
||||
file.write_all(config_str.as_bytes())?;
|
||||
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||
operator.write(&CONFIG_FILENAME, config_str).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
async fn update_config_partial(&self, other: ConfigBuilder) -> Result<(), Error> {
|
||||
let builder = {
|
||||
let usr = &self.inner.read().unwrap()._usr;
|
||||
let mut _overrides = Vec::new();
|
||||
usr.merge(&other, false, &mut _overrides)
|
||||
};
|
||||
self.update_config(builder, false)
|
||||
self.update_config(builder, false).await
|
||||
}
|
||||
|
||||
// The `signups_allowed` setting is overrided if:
|
||||
|
@ -1359,8 +1463,9 @@ impl Config {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn delete_user_config(&self) -> Result<(), Error> {
|
||||
std::fs::remove_file(&*CONFIG_FILE)?;
|
||||
pub async fn delete_user_config(&self) -> Result<(), Error> {
|
||||
let operator = opendal_operator_for_path(&CONFIG_FILE_PARENT_DIR)?;
|
||||
operator.delete(&CONFIG_FILENAME).await?;
|
||||
|
||||
// Empty user config
|
||||
let usr = ConfigBuilder::default();
|
||||
|
@ -1390,7 +1495,7 @@ impl Config {
|
|||
inner._enable_smtp && (inner.smtp_host.is_some() || inner.use_sendmail)
|
||||
}
|
||||
|
||||
pub fn get_duo_akey(&self) -> String {
|
||||
pub async fn get_duo_akey(&self) -> String {
|
||||
if let Some(akey) = self._duo_akey() {
|
||||
akey
|
||||
} else {
|
||||
|
@ -1401,7 +1506,7 @@ impl Config {
|
|||
_duo_akey: Some(akey_s.clone()),
|
||||
..Default::default()
|
||||
};
|
||||
self.update_config_partial(builder).ok();
|
||||
self.update_config_partial(builder).await.ok();
|
||||
|
||||
akey_s
|
||||
}
|
||||
|
@ -1414,6 +1519,23 @@ impl Config {
|
|||
token.is_some() && !token.unwrap().trim().is_empty()
|
||||
}
|
||||
|
||||
pub fn opendal_operator_for_path_type(&self, path_type: PathType) -> Result<opendal::Operator, Error> {
|
||||
let path = match path_type {
|
||||
PathType::Data => self.data_folder(),
|
||||
PathType::IconCache => self.icon_cache_folder(),
|
||||
PathType::Attachments => self.attachments_folder(),
|
||||
PathType::Sends => self.sends_folder(),
|
||||
PathType::RsaKey => std::path::Path::new(&self.rsa_key_filename())
|
||||
.parent()
|
||||
.ok_or_else(|| std::io::Error::other("Failed to get directory of RSA key file"))?
|
||||
.to_str()
|
||||
.ok_or_else(|| std::io::Error::other("Failed to convert RSA key file directory to UTF-8 string"))?
|
||||
.to_string(),
|
||||
};
|
||||
|
||||
opendal_operator_for_path(&path)
|
||||
}
|
||||
|
||||
pub fn render_template<T: serde::ser::Serialize>(&self, name: &str, data: &T) -> Result<String, Error> {
|
||||
if self.reload_templates() {
|
||||
warn!("RELOADING TEMPLATES");
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
use std::io::ErrorKind;
|
||||
use std::time::Duration;
|
||||
|
||||
use bigdecimal::{BigDecimal, ToPrimitive};
|
||||
use derive_more::{AsRef, Deref, Display};
|
||||
use serde_json::Value;
|
||||
|
||||
use super::{CipherId, OrganizationId, UserId};
|
||||
use crate::CONFIG;
|
||||
use crate::{config::PathType, CONFIG};
|
||||
use macros::IdFromParam;
|
||||
|
||||
db_object! {
|
||||
|
@ -41,24 +41,30 @@ impl Attachment {
|
|||
}
|
||||
|
||||
pub fn get_file_path(&self) -> String {
|
||||
format!("{}/{}/{}", CONFIG.attachments_folder(), self.cipher_uuid, self.id)
|
||||
format!("{}/{}", self.cipher_uuid, self.id)
|
||||
}
|
||||
|
||||
pub fn get_url(&self, host: &str) -> String {
|
||||
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
|
||||
format!("{host}/attachments/{}/{}?token={token}", self.cipher_uuid, self.id)
|
||||
pub async fn get_url(&self, host: &str) -> Result<String, crate::Error> {
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
|
||||
|
||||
if operator.info().scheme() == opendal::Scheme::Fs {
|
||||
let token = encode_jwt(&generate_file_download_claims(self.cipher_uuid.clone(), self.id.clone()));
|
||||
Ok(format!("{host}/attachments/{}/{}?token={token}", self.cipher_uuid, self.id))
|
||||
} else {
|
||||
Ok(operator.presign_read(&self.get_file_path(), Duration::from_secs(5 * 60)).await?.uri().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_json(&self, host: &str) -> Value {
|
||||
json!({
|
||||
pub async fn to_json(&self, host: &str) -> Result<Value, crate::Error> {
|
||||
Ok(json!({
|
||||
"id": self.id,
|
||||
"url": self.get_url(host),
|
||||
"url": self.get_url(host).await?,
|
||||
"fileName": self.file_name,
|
||||
"size": self.file_size.to_string(),
|
||||
"sizeName": crate::util::get_display_size(self.file_size),
|
||||
"key": self.akey,
|
||||
"object": "attachment"
|
||||
})
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,26 +110,26 @@ impl Attachment {
|
|||
|
||||
pub async fn delete(&self, conn: &mut DbConn) -> EmptyResult {
|
||||
db_run! { conn: {
|
||||
let _: () = crate::util::retry(
|
||||
crate::util::retry(
|
||||
|| diesel::delete(attachments::table.filter(attachments::id.eq(&self.id))).execute(conn),
|
||||
10,
|
||||
)
|
||||
.map_res("Error deleting attachment")?;
|
||||
.map(|_| ())
|
||||
.map_res("Error deleting attachment")
|
||||
}}?;
|
||||
|
||||
let file_path = &self.get_file_path();
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::Attachments)?;
|
||||
let file_path = self.get_file_path();
|
||||
|
||||
match std::fs::remove_file(file_path) {
|
||||
// Ignore "file not found" errors. This can happen when the
|
||||
// upstream caller has already cleaned up the file as part of
|
||||
// its own error handling.
|
||||
Err(e) if e.kind() == ErrorKind::NotFound => {
|
||||
debug!("File '{file_path}' already deleted.");
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
_ => Ok(()),
|
||||
if let Err(e) = operator.delete(&file_path).await {
|
||||
if e.kind() == opendal::ErrorKind::NotFound {
|
||||
debug!("File '{file_path}' already deleted.");
|
||||
} else {
|
||||
return Err(e.into());
|
||||
}
|
||||
}}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_all_by_cipher(cipher_uuid: &CipherId, conn: &mut DbConn) -> EmptyResult {
|
||||
|
|
|
@ -141,18 +141,28 @@ impl Cipher {
|
|||
cipher_sync_data: Option<&CipherSyncData>,
|
||||
sync_type: CipherSyncType,
|
||||
conn: &mut DbConn,
|
||||
) -> Value {
|
||||
) -> Result<Value, crate::Error> {
|
||||
use crate::util::{format_date, validate_and_format_date};
|
||||
|
||||
let mut attachments_json: Value = Value::Null;
|
||||
if let Some(cipher_sync_data) = cipher_sync_data {
|
||||
if let Some(attachments) = cipher_sync_data.cipher_attachments.get(&self.uuid) {
|
||||
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect();
|
||||
if !attachments.is_empty() {
|
||||
let mut attachments_json_vec = vec![];
|
||||
for attachment in attachments {
|
||||
attachments_json_vec.push(attachment.to_json(host).await?);
|
||||
}
|
||||
attachments_json = Value::Array(attachments_json_vec);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let attachments = Attachment::find_by_cipher(&self.uuid, conn).await;
|
||||
if !attachments.is_empty() {
|
||||
attachments_json = attachments.iter().map(|c| c.to_json(host)).collect()
|
||||
let mut attachments_json_vec = vec![];
|
||||
for attachment in attachments {
|
||||
attachments_json_vec.push(attachment.to_json(host).await?);
|
||||
}
|
||||
attachments_json = Value::Array(attachments_json_vec);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -384,7 +394,7 @@ impl Cipher {
|
|||
};
|
||||
|
||||
json_object[key] = type_data_json;
|
||||
json_object
|
||||
Ok(json_object)
|
||||
}
|
||||
|
||||
pub async fn update_users_revision(&self, conn: &mut DbConn) -> Vec<UserId> {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use chrono::{NaiveDateTime, Utc};
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::util::LowerCase;
|
||||
use crate::{config::PathType, util::LowerCase, CONFIG};
|
||||
|
||||
use super::{OrganizationId, User, UserId};
|
||||
use id::SendId;
|
||||
|
@ -226,7 +226,8 @@ impl Send {
|
|||
self.update_users_revision(conn).await;
|
||||
|
||||
if self.atype == SendType::File as i32 {
|
||||
std::fs::remove_dir_all(std::path::Path::new(&crate::CONFIG.sends_folder()).join(&self.uuid)).ok();
|
||||
let operator = CONFIG.opendal_operator_for_path_type(PathType::Sends)?;
|
||||
operator.remove_all(&self.uuid).await.ok();
|
||||
}
|
||||
|
||||
db_run! { conn: {
|
||||
|
|
|
@ -46,6 +46,7 @@ use jsonwebtoken::errors::Error as JwtErr;
|
|||
use lettre::address::AddressError as AddrErr;
|
||||
use lettre::error::Error as LettreErr;
|
||||
use lettre::transport::smtp::Error as SmtpErr;
|
||||
use opendal::Error as OpenDALErr;
|
||||
use openssl::error::ErrorStack as SSLErr;
|
||||
use regex::Error as RegexErr;
|
||||
use reqwest::Error as ReqErr;
|
||||
|
@ -98,6 +99,8 @@ make_error! {
|
|||
|
||||
DieselCon(DieselConErr): _has_source, _api_error,
|
||||
Webauthn(WebauthnErr): _has_source, _api_error,
|
||||
|
||||
OpenDAL(OpenDALErr): _has_source, _api_error,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for Error {
|
||||
|
|
25
src/main.rs
25
src/main.rs
|
@ -62,7 +62,7 @@ mod util;
|
|||
use crate::api::core::two_factor::duo_oidc::purge_duo_contexts;
|
||||
use crate::api::purge_auth_requests;
|
||||
use crate::api::{WS_ANONYMOUS_SUBSCRIPTIONS, WS_USERS};
|
||||
pub use config::CONFIG;
|
||||
pub use config::{PathType, CONFIG};
|
||||
pub use error::{Error, MapResult};
|
||||
use rocket::data::{Limits, ToByteUnit};
|
||||
use std::sync::{atomic::Ordering, Arc};
|
||||
|
@ -76,16 +76,13 @@ async fn main() -> Result<(), Error> {
|
|||
let level = init_logging()?;
|
||||
|
||||
check_data_folder().await;
|
||||
auth::initialize_keys().unwrap_or_else(|e| {
|
||||
auth::initialize_keys().await.unwrap_or_else(|e| {
|
||||
error!("Error creating private key '{}'\n{e:?}\nExiting Vaultwarden!", CONFIG.private_rsa_key());
|
||||
exit(1);
|
||||
});
|
||||
check_web_vault();
|
||||
|
||||
create_dir(&CONFIG.icon_cache_folder(), "icon cache");
|
||||
create_dir(&CONFIG.tmp_folder(), "tmp folder");
|
||||
create_dir(&CONFIG.sends_folder(), "sends folder");
|
||||
create_dir(&CONFIG.attachments_folder(), "attachments folder");
|
||||
|
||||
let pool = create_db_pool().await;
|
||||
schedule_jobs(pool.clone());
|
||||
|
@ -465,6 +462,24 @@ fn create_dir(path: &str, description: &str) {
|
|||
|
||||
async fn check_data_folder() {
|
||||
let data_folder = &CONFIG.data_folder();
|
||||
|
||||
if data_folder.starts_with("s3://") {
|
||||
if let Err(e) = CONFIG
|
||||
.opendal_operator_for_path_type(PathType::Data)
|
||||
.unwrap_or_else(|e| {
|
||||
error!("Failed to create S3 operator for data folder '{data_folder}': {e:?}");
|
||||
exit(1);
|
||||
})
|
||||
.check()
|
||||
.await
|
||||
{
|
||||
error!("Could not access S3 data folder '{data_folder}': {e:?}");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
let path = Path::new(data_folder);
|
||||
if !path.exists() {
|
||||
error!("Data folder '{data_folder}' doesn't exist.");
|
||||
|
|
4
src/static/scripts/admin.css
vendored
4
src/static/scripts/admin.css
vendored
|
@ -54,3 +54,7 @@ img {
|
|||
.vw-copy-toast {
|
||||
width: 15rem;
|
||||
}
|
||||
|
||||
.abbr-badge {
|
||||
cursor: help;
|
||||
}
|
||||
|
|
8
src/static/scripts/admin_diagnostics.js
vendored
8
src/static/scripts/admin_diagnostics.js
vendored
|
@ -208,11 +208,9 @@ function initVersionCheck(dj) {
|
|||
}
|
||||
checkVersions("server", serverInstalled, serverLatest, serverLatestCommit);
|
||||
|
||||
if (!dj.running_within_container) {
|
||||
const webInstalled = dj.web_vault_version;
|
||||
const webLatest = dj.latest_web_build;
|
||||
checkVersions("web", webInstalled, webLatest, null, dj.web_vault_pre_release);
|
||||
}
|
||||
const webInstalled = dj.web_vault_version;
|
||||
const webLatest = dj.latest_web_build;
|
||||
checkVersions("web", webInstalled, webLatest, null, dj.web_vault_pre_release);
|
||||
}
|
||||
|
||||
function checkDns(dns_resolved) {
|
||||
|
|
|
@ -7,36 +7,34 @@
|
|||
<div class="col-md">
|
||||
<dl class="row">
|
||||
<dt class="col-sm-5">Server Installed
|
||||
<span class="badge bg-success d-none" id="server-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none" id="server-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none" id="server-branch" title="This is a branched version.">Branched</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="server-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none abbr-badge" id="server-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none abbr-badge" id="server-branch" title="This is a branched version.">Branched</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="server-installed">{{page_data.current_release}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Server Latest
|
||||
<span class="badge bg-secondary d-none" id="server-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
<span class="badge bg-secondary d-none abbr-badge" id="server-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="server-latest">{{page_data.latest_release}}<span id="server-latest-commit" class="d-none">-{{page_data.latest_commit}}</span></span>
|
||||
</dd>
|
||||
{{#if page_data.web_vault_enabled}}
|
||||
<dt class="col-sm-5">Web Installed
|
||||
<span class="badge bg-success d-none" id="web-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none" id="web-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none" id="web-prerelease" title="You seem to be using a pre-release version.">Pre-Release</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="web-success" title="Latest version is installed.">Ok</span>
|
||||
<span class="badge bg-warning text-dark d-none abbr-badge" id="web-warning" title="There seems to be an update available.">Update</span>
|
||||
<span class="badge bg-info text-dark d-none abbr-badge" id="web-prerelease" title="You seem to be using a pre-release version.">Pre-Release</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-installed">{{page_data.web_vault_version}}</span>
|
||||
</dd>
|
||||
{{#unless page_data.running_within_container}}
|
||||
<dt class="col-sm-5">Web Latest
|
||||
<span class="badge bg-secondary d-none" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
<span class="badge bg-secondary d-none abbr-badge" id="web-failed" title="Unable to determine latest version.">Unknown</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="web-latest">{{page_data.latest_web_build}}</span>
|
||||
</dd>
|
||||
{{/unless}}
|
||||
{{/if}}
|
||||
{{#unless page_data.web_vault_enabled}}
|
||||
<dt class="col-sm-5">Web Installed</dt>
|
||||
|
@ -69,14 +67,11 @@
|
|||
<span class="d-block"><b>No</b></span>
|
||||
{{/unless}}
|
||||
</dd>
|
||||
<dt class="col-sm-5">Uses config.json
|
||||
{{#if page_data.overrides}}
|
||||
<span class="badge bg-info text-dark" title="Environment variables are overwritten by a config.json.">Note</span>
|
||||
{{/if}}
|
||||
</dt>
|
||||
<dt class="col-sm-5">Uses config.json</dt>
|
||||
<dd class="col-sm-7">
|
||||
{{#if page_data.overrides}}
|
||||
<abbr class="d-block" title="The following settings are overridden: {{page_data.overrides}}"><b>Yes</b></abbr>
|
||||
<span class="d-inline"><b>Yes</b></span>
|
||||
<span class="badge bg-info text-dark abbr-badge" title="Environment variables are overwritten by a config.json.
{{page_data.overrides}}">Details</span>
|
||||
{{/if}}
|
||||
{{#unless page_data.overrides}}
|
||||
<span class="d-block"><b>No</b></span>
|
||||
|
@ -95,10 +90,10 @@
|
|||
{{#if page_data.ip_header_exists}}
|
||||
<dt class="col-sm-5">IP header
|
||||
{{#if page_data.ip_header_match}}
|
||||
<span class="badge bg-success" title="IP_HEADER config seems to be valid.">Match</span>
|
||||
<span class="badge bg-success abbr-badge" title="IP_HEADER config seems to be valid.">Match</span>
|
||||
{{/if}}
|
||||
{{#unless page_data.ip_header_match}}
|
||||
<span class="badge bg-danger" title="IP_HEADER config seems to be invalid. IP's in the log could be invalid. Please fix.">No Match</span>
|
||||
<span class="badge bg-danger abbr-badge" title="IP_HEADER config seems to be invalid. IP's in the log could be invalid. Please fix.">No Match</span>
|
||||
{{/unless}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
|
@ -114,10 +109,10 @@
|
|||
{{!-- End if IP Header Exists --}}
|
||||
<dt class="col-sm-5">Internet access
|
||||
{{#if page_data.has_http_access}}
|
||||
<span class="badge bg-success" title="We have internet access!">Ok</span>
|
||||
<span class="badge bg-success abbr-badge" title="We have internet access!">Ok</span>
|
||||
{{/if}}
|
||||
{{#unless page_data.has_http_access}}
|
||||
<span class="badge bg-danger" title="There seems to be no internet access. Please fix.">Error</span>
|
||||
<span class="badge bg-danger abbr-badge" title="There seems to be no internet access. Please fix.">Error</span>
|
||||
{{/unless}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
|
@ -139,8 +134,8 @@
|
|||
</dd>
|
||||
<dt class="col-sm-5">Websocket enabled
|
||||
{{#if page_data.enable_websocket}}
|
||||
<span class="badge bg-success d-none" id="websocket-success" title="Websocket connection is working.">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="websocket-error" title="Websocket connection error, validate your reverse proxy configuration!">Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="websocket-success" title="Websocket connection is working.">Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="websocket-error" title="Websocket connection error, validate your reverse proxy configuration!">Error</span>
|
||||
{{/if}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
|
@ -153,27 +148,27 @@
|
|||
</dd>
|
||||
|
||||
<dt class="col-sm-5">DNS (github.com)
|
||||
<span class="badge bg-success d-none" id="dns-success" title="DNS Resolving works!">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="dns-success" title="DNS Resolving works!">Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="dns-warning" title="DNS Resolving failed. Please fix.">Error</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="dns-resolved">{{page_data.dns_resolved}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Date & Time (Local)
|
||||
{{#if page_data.tz_env}}
|
||||
<span class="badge bg-success" title="Configured TZ environment variable">{{page_data.tz_env}}</span>
|
||||
<span class="badge bg-success abbr-badge" title="Configured TZ environment variable">{{page_data.tz_env}}</span>
|
||||
{{/if}}
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span><b>Server:</b> {{page_data.server_time_local}}</span>
|
||||
</dd>
|
||||
<dt class="col-sm-5">Date & Time (UTC)
|
||||
<span class="badge bg-success d-none" id="time-success" title="Server and browser times are within 15 seconds of each other.">Server/Browser Ok</span>
|
||||
<span class="badge bg-danger d-none" id="time-warning" title="Server and browser times are more than 15 seconds apart.">Server/Browser Error</span>
|
||||
<span class="badge bg-success d-none" id="ntp-server-success" title="Server and NTP times are within 15 seconds of each other.">Server NTP Ok</span>
|
||||
<span class="badge bg-danger d-none" id="ntp-server-warning" title="Server and NTP times are more than 15 seconds apart.">Server NTP Error</span>
|
||||
<span class="badge bg-success d-none" id="ntp-browser-success" title="Browser and NTP times are within 15 seconds of each other.">Browser NTP Ok</span>
|
||||
<span class="badge bg-danger d-none" id="ntp-browser-warning" title="Browser and NTP times are more than 15 seconds apart.">Browser NTP Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="time-success" title="Server and browser times are within 15 seconds of each other.">Server/Browser Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="time-warning" title="Server and browser times are more than 15 seconds apart.">Server/Browser Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="ntp-server-success" title="Server and NTP times are within 15 seconds of each other.">Server NTP Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="ntp-server-warning" title="Server and NTP times are more than 15 seconds apart.">Server NTP Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="ntp-browser-success" title="Browser and NTP times are within 15 seconds of each other.">Browser NTP Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="ntp-browser-warning" title="Browser and NTP times are more than 15 seconds apart.">Browser NTP Error</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="ntp-time" class="d-block"><b>NTP:</b> <span id="ntp-server-string">{{page_data.ntp_time}}</span></span>
|
||||
|
@ -182,10 +177,10 @@
|
|||
</dd>
|
||||
|
||||
<dt class="col-sm-5">Domain configuration
|
||||
<span class="badge bg-success d-none" id="domain-success" title="The domain variable matches the browser location and seems to be configured correctly.">Match</span>
|
||||
<span class="badge bg-danger d-none" id="domain-warning" title="The domain variable does not match the browser location.
The domain variable does not seem to be configured correctly.
Some features may not work as expected!">No Match</span>
|
||||
<span class="badge bg-success d-none" id="https-success" title="Configured to use HTTPS">HTTPS</span>
|
||||
<span class="badge bg-danger d-none" id="https-warning" title="Not configured to use HTTPS.
Some features may not work as expected!">No HTTPS</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="domain-success" title="The domain variable matches the browser location and seems to be configured correctly.">Match</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="domain-warning" title="The domain variable does not match the browser location.
The domain variable does not seem to be configured correctly.
Some features may not work as expected!">No Match</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="https-success" title="Configured to use HTTPS">HTTPS</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="https-warning" title="Not configured to use HTTPS.
Some features may not work as expected!">No HTTPS</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="domain-server" class="d-block"><b>Server:</b> <span id="domain-server-string">{{page_data.admin_url}}</span></span>
|
||||
|
@ -193,8 +188,8 @@
|
|||
</dd>
|
||||
|
||||
<dt class="col-sm-5">HTTP Response validation
|
||||
<span class="badge bg-success d-none" id="http-response-success" title="All headers and HTTP request responses seem to be ok.">Ok</span>
|
||||
<span class="badge bg-danger d-none" id="http-response-warning" title="Some headers or HTTP request responses return invalid data!">Error</span>
|
||||
<span class="badge bg-success d-none abbr-badge" id="http-response-success" title="All headers and HTTP request responses seem to be ok.">Ok</span>
|
||||
<span class="badge bg-danger d-none abbr-badge" id="http-response-warning" title="Some headers or HTTP request responses return invalid data!">Error</span>
|
||||
</dt>
|
||||
<dd class="col-sm-7">
|
||||
<span id="http-response-errors" class="d-block"></span>
|
||||
|
|
|
@ -21,7 +21,7 @@ a[href$="/settings/sponsored-families"] {
|
|||
}
|
||||
|
||||
/* Hide Log in with passkey on the login page */
|
||||
app-root form.ng-untouched a[routerlink="/login-with-passkey"] {
|
||||
app-root form.ng-untouched > div > div > button.\!tw-text-primary-600:nth-child(3) {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
/* Hide the or text followed by the two buttons hidden above */
|
||||
|
|
22
src/util.rs
22
src/util.rs
|
@ -16,7 +16,7 @@ use tokio::{
|
|||
time::{sleep, Duration},
|
||||
};
|
||||
|
||||
use crate::CONFIG;
|
||||
use crate::{config::PathType, CONFIG};
|
||||
|
||||
pub struct AppHeaders();
|
||||
|
||||
|
@ -830,6 +830,26 @@ pub fn is_global(ip: std::net::IpAddr) -> bool {
|
|||
ip.is_global()
|
||||
}
|
||||
|
||||
/// Saves a Rocket temporary file to the OpenDAL Operator at the given path.
|
||||
pub async fn save_temp_file(
|
||||
path_type: PathType,
|
||||
path: &str,
|
||||
temp_file: rocket::fs::TempFile<'_>,
|
||||
overwrite: bool,
|
||||
) -> Result<(), crate::Error> {
|
||||
use futures::AsyncWriteExt as _;
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt as _;
|
||||
|
||||
let operator = CONFIG.opendal_operator_for_path_type(path_type)?;
|
||||
|
||||
let mut read_stream = temp_file.open().await?.compat();
|
||||
let mut writer = operator.writer_with(path).if_not_exists(!overwrite).await?.into_futures_async_write();
|
||||
futures::io::copy(&mut read_stream, &mut writer).await?;
|
||||
writer.close().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// These are some tests to check that the implementations match
|
||||
/// The IPv4 can be all checked in 30 seconds or so and they are correct as of nightly 2023-07-17
|
||||
/// The IPV6 can't be checked in a reasonable time, so we check over a hundred billion random ones, so far correct
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue