mirror of
https://github.com/dani-garcia/vaultwarden.git
synced 2025-08-28 05:24:49 +00:00
Merge remote-tracking branch 'origin/main'
This commit is contained in:
commit
5964baa72c
25 changed files with 176 additions and 96 deletions
|
@ -485,7 +485,7 @@
|
|||
# SSO_AUTHORITY=https://auth.example.com
|
||||
|
||||
## Authorization request scopes. Optional SSO scopes, override if email and profile are not enough (`openid` is implicit).
|
||||
#SSO_SCOPES="email profile"
|
||||
# SSO_SCOPES="email profile"
|
||||
|
||||
## Additional authorization url parameters (ex: to obtain a `refresh_token` with Google Auth).
|
||||
# SSO_AUTHORIZE_EXTRA_PARAMS="access_type=offline&prompt=consent"
|
||||
|
|
|
@ -6,7 +6,7 @@ name = "vaultwarden"
|
|||
version = "1.0.0"
|
||||
authors = ["Daniel García <dani-garcia@users.noreply.github.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.86.0"
|
||||
rust-version = "1.87.0"
|
||||
resolver = "2"
|
||||
|
||||
repository = "https://github.com/dani-garcia/vaultwarden"
|
||||
|
|
|
@ -5,7 +5,7 @@ vault_image_digest: "sha256:f6ac819a2cd9e226f2cd2ec26196ede94a41e672e9672a11b5f3
|
|||
# We use the linux/amd64 platform shell scripts since there is no difference between the different platform scripts
|
||||
# https://github.com/tonistiigi/xx | https://hub.docker.com/r/tonistiigi/xx/tags
|
||||
xx_image_digest: "sha256:9c207bead753dda9430bdd15425c6518fc7a03d866103c516a2c6889188f5894"
|
||||
rust_version: 1.88.0 # Rust version to be used
|
||||
rust_version: 1.89.0 # Rust version to be used
|
||||
debian_version: bookworm # Debian release name to be used
|
||||
alpine_version: "3.22" # Alpine version to be used
|
||||
# For which platforms/architectures will we try to build images
|
||||
|
|
|
@ -32,10 +32,10 @@ FROM --platform=linux/amd64 docker.io/vaultwarden/web-vault@sha256:f6ac819a2cd9e
|
|||
########################## ALPINE BUILD IMAGES ##########################
|
||||
## NOTE: The Alpine Base Images do not support other platforms then linux/amd64
|
||||
## And for Alpine we define all build images here, they will only be loaded when actually used
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.88.0 AS build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.88.0 AS build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.88.0 AS build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.88.0 AS build_armv6
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:x86_64-musl-stable-1.89.0 AS build_amd64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:aarch64-musl-stable-1.89.0 AS build_arm64
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:armv7-musleabihf-stable-1.89.0 AS build_armv7
|
||||
FROM --platform=linux/amd64 ghcr.io/blackdex/rust-musl:arm-musleabi-stable-1.89.0 AS build_armv6
|
||||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
|
|
|
@ -36,7 +36,7 @@ FROM --platform=linux/amd64 docker.io/tonistiigi/xx@sha256:9c207bead753dda9430bd
|
|||
|
||||
########################## BUILD IMAGE ##########################
|
||||
# hadolint ignore=DL3006
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.88.0-slim-bookworm AS build
|
||||
FROM --platform=$BUILDPLATFORM docker.io/library/rust:1.89.0-slim-bookworm AS build
|
||||
COPY --from=xx / /
|
||||
ARG TARGETARCH
|
||||
ARG TARGETVARIANT
|
||||
|
|
|
@ -1,18 +1,18 @@
|
|||
# Integration tests
|
||||
|
||||
This allows running integration tests using [Playwright](https://playwright.dev/).
|
||||
\
|
||||
It usse its own [test.env](/test/scenarios/test.env) with different ports to not collide with a running dev instance.
|
||||
|
||||
It uses its own `test.env` with different ports to not collide with a running dev instance.
|
||||
|
||||
## Install
|
||||
|
||||
This rely on `docker` and the `compose` [plugin](https://docs.docker.com/compose/install/).
|
||||
This relies on `docker` and the `compose` [plugin](https://docs.docker.com/compose/install/).
|
||||
Databases (`Mariadb`, `Mysql` and `Postgres`) and `Playwright` will run in containers.
|
||||
|
||||
### Running Playwright outside docker
|
||||
|
||||
It's possible to run `Playwright` outside of the container, this remove the need to rebuild the image for each change.
|
||||
You'll additionally need `nodejs` then run:
|
||||
It is possible to run `Playwright` outside of the container, this removes the need to rebuild the image for each change.
|
||||
You will additionally need `nodejs` then run:
|
||||
|
||||
```bash
|
||||
npm install
|
||||
|
@ -33,7 +33,7 @@ To force a rebuild of the Playwright image:
|
|||
DOCKER_BUILDKIT=1 docker compose --env-file test.env build Playwright
|
||||
```
|
||||
|
||||
To access the ui to easily run test individually and debug if needed (will not work in docker):
|
||||
To access the UI to easily run test individually and debug if needed (this will not work in docker):
|
||||
|
||||
```bash
|
||||
npx playwright test --ui
|
||||
|
@ -42,7 +42,7 @@ npx playwright test --ui
|
|||
### DB
|
||||
|
||||
Projects are configured to allow to run tests only on specific database.
|
||||
\
|
||||
|
||||
You can use:
|
||||
|
||||
```bash
|
||||
|
@ -62,7 +62,7 @@ DOCKER_BUILDKIT=1 docker compose --profile playwright --env-file test.env run Pl
|
|||
|
||||
### Keep services running
|
||||
|
||||
If you want you can keep the Db and Keycloak runnning (states are not impacted by the tests):
|
||||
If you want you can keep the DB and Keycloak runnning (states are not impacted by the tests):
|
||||
|
||||
```bash
|
||||
PW_KEEP_SERVICE_RUNNNING=true npx playwright test
|
||||
|
@ -86,7 +86,8 @@ DOCKER_BUILDKIT=1 docker compose --profile playwright --env-file test.env run Pl
|
|||
|
||||
## Writing scenario
|
||||
|
||||
When creating new scenario use the recorder to more easily identify elements (in general try to rely on visible hint to identify elements and not hidden ids).
|
||||
When creating new scenario use the recorder to more easily identify elements
|
||||
(in general try to rely on visible hint to identify elements and not hidden IDs).
|
||||
This does not start the server, you will need to start it manually.
|
||||
|
||||
```bash
|
||||
|
@ -95,7 +96,7 @@ npx playwright codegen "http://127.0.0.1:8000"
|
|||
|
||||
## Override web-vault
|
||||
|
||||
It's possible to change the `web-vault` used by referencing a different `bw_web_builds` commit.
|
||||
It is possible to change the `web-vault` used by referencing a different `bw_web_builds` commit.
|
||||
|
||||
```bash
|
||||
export PW_WV_REPO_URL=https://github.com/Timshel/oidc_web_builds.git
|
||||
|
@ -105,12 +106,13 @@ DOCKER_BUILDKIT=1 docker compose --profile playwright --env-file test.env build
|
|||
|
||||
# OpenID Connect test setup
|
||||
|
||||
Additionally this `docker-compose` template allow to run locally `VaultWarden`, [Keycloak](https://www.keycloak.org/) and [Maildev](https://github.com/timshel/maildev) to test OIDC.
|
||||
Additionally this `docker-compose` template allows to run locally Vaultwarden,
|
||||
[Keycloak](https://www.keycloak.org/) and [Maildev](https://github.com/timshel/maildev) to test OIDC.
|
||||
|
||||
## Setup
|
||||
|
||||
This rely on `docker` and the `compose` [plugin](https://docs.docker.com/compose/install/).
|
||||
First create a copy of `.env.template` as `.env` (This is done to prevent commiting your custom settings, Ex `SMTP_`).
|
||||
First create a copy of `.env.template` as `.env` (This is done to prevent committing your custom settings, Ex `SMTP_`).
|
||||
|
||||
## Usage
|
||||
|
||||
|
@ -125,11 +127,12 @@ keycloakSetup_1 | 74af4933-e386-4e64-ba15-a7b61212c45e
|
|||
oidc_keycloakSetup_1 exited with code 0
|
||||
```
|
||||
|
||||
Wait until `oidc_keycloakSetup_1 exited with code 0` which indicate the correct setup of the Keycloak realm, client and user (It's normal for this container to stop once the configuration is done).
|
||||
Wait until `oidc_keycloakSetup_1 exited with code 0` which indicates the correct setup of the Keycloak realm, client and user
|
||||
(It is normal for this container to stop once the configuration is done).
|
||||
|
||||
Then you can access :
|
||||
|
||||
- `VaultWarden` on http://0.0.0.0:8000 with the default user `test@yopmail.com/test`.
|
||||
- `Vaultwarden` on http://0.0.0.0:8000 with the default user `test@yopmail.com/test`.
|
||||
- `Keycloak` on http://0.0.0.0:8080/admin/master/console/ with the default user `admin/admin`
|
||||
- `Maildev` on http://0.0.0.0:1080
|
||||
|
||||
|
@ -143,7 +146,7 @@ You can run just `Keycloak` with `--profile keycloak`:
|
|||
```bash
|
||||
> docker compose --profile keycloak --env-file .env up
|
||||
```
|
||||
When running with a local VaultWarden, you can use a front-end build from [dani-garcia/bw_web_builds](https://github.com/dani-garcia/bw_web_builds/releases).
|
||||
When running with a local Vaultwarden, you can use a front-end build from [dani-garcia/bw_web_builds](https://github.com/dani-garcia/bw_web_builds/releases).
|
||||
|
||||
## Rebuilding the Vaultwarden
|
||||
|
||||
|
@ -155,12 +158,12 @@ docker compose --profile vaultwarden --env-file .env build VaultwardenPrebuild V
|
|||
|
||||
## Configuration
|
||||
|
||||
All configuration for `keycloak` / `VaultWarden` / `keycloak_setup.sh` can be found in [.env](.env.template).
|
||||
All configuration for `keycloak` / `Vaultwarden` / `keycloak_setup.sh` can be found in [.env](.env.template).
|
||||
The content of the file will be loaded as environment variables in all containers.
|
||||
|
||||
- `keycloak` [configuration](https://www.keycloak.org/server/all-config) include `KEYCLOAK_ADMIN` / `KEYCLOAK_ADMIN_PASSWORD` and any variable prefixed `KC_` ([more information](https://www.keycloak.org/server/configuration#_example_configuring_the_db_url_host_parameter)).
|
||||
- All `VaultWarden` configuration can be set (EX: `SMTP_*`)
|
||||
- `keycloak` [configuration](https://www.keycloak.org/server/all-config) includes `KEYCLOAK_ADMIN` / `KEYCLOAK_ADMIN_PASSWORD` and any variable prefixed `KC_` ([more information](https://www.keycloak.org/server/configuration#_example_configuring_the_db_url_host_parameter)).
|
||||
- All `Vaultwarden` configuration can be set (EX: `SMTP_*`)
|
||||
|
||||
## Cleanup
|
||||
|
||||
Use `docker compose --profile vaultWarden down`.
|
||||
Use `docker compose --profile vaultwarden down`.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
FROM playwright_oidc_vaultwarden_prebuilt AS prebuilt
|
||||
|
||||
FROM node:18-bookworm AS build
|
||||
FROM node:22-bookworm AS build
|
||||
|
||||
ARG REPO_URL
|
||||
ARG COMMIT_HASH
|
||||
|
|
|
@ -43,7 +43,7 @@ KEYCLOAK_ADMIN_PASSWORD=${KEYCLOAK_ADMIN}
|
|||
KC_HTTP_HOST=127.0.0.1
|
||||
KC_HTTP_PORT=8081
|
||||
|
||||
# Script parameters (use Keycloak and VaultWarden config too)
|
||||
# Script parameters (use Keycloak and Vaultwarden config too)
|
||||
TEST_REALM=test
|
||||
DUMMY_REALM=dummy
|
||||
DUMMY_AUTHORITY=http://${KC_HTTP_HOST}:${KC_HTTP_PORT}/realms/${DUMMY_REALM}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
[toolchain]
|
||||
channel = "1.88.0"
|
||||
channel = "1.89.0"
|
||||
components = [ "rustfmt", "clippy" ]
|
||||
profile = "minimal"
|
||||
|
|
|
@ -342,11 +342,11 @@ async fn post_set_password(data: Json<SetPasswordData>, headers: Headers, mut co
|
|||
let mut user = headers.user;
|
||||
|
||||
if user.private_key.is_some() {
|
||||
err!("Account already intialized cannot set password")
|
||||
err!("Account already initialized, cannot set password")
|
||||
}
|
||||
|
||||
// Check against the password hint setting here so if it fails, the user
|
||||
// can retry without losing their invitation below.
|
||||
// Check against the password hint setting here so if it fails,
|
||||
// the user can retry without losing their invitation below.
|
||||
let password_hint = clean_password_hint(&data.master_password_hint);
|
||||
enforce_password_hint_setting(&password_hint)?;
|
||||
|
||||
|
|
|
@ -78,6 +78,7 @@ pub fn routes() -> Vec<Route> {
|
|||
restore_cipher_put,
|
||||
restore_cipher_put_admin,
|
||||
restore_cipher_selected,
|
||||
restore_cipher_selected_admin,
|
||||
delete_all,
|
||||
move_cipher_selected,
|
||||
move_cipher_selected_put,
|
||||
|
@ -318,7 +319,7 @@ async fn post_ciphers_create(
|
|||
// or otherwise), we can just ignore this field entirely.
|
||||
data.cipher.last_known_revision_date = None;
|
||||
|
||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt).await
|
||||
share_cipher_by_uuid(&cipher.uuid, data, &headers, &mut conn, &nt, None).await
|
||||
}
|
||||
|
||||
/// Called when creating a new user-owned cipher.
|
||||
|
@ -920,7 +921,7 @@ async fn post_cipher_share(
|
|||
) -> JsonResult {
|
||||
let data: ShareCipherData = data.into_inner();
|
||||
|
||||
share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt).await
|
||||
share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt, None).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/share", data = "<data>")]
|
||||
|
@ -933,7 +934,7 @@ async fn put_cipher_share(
|
|||
) -> JsonResult {
|
||||
let data: ShareCipherData = data.into_inner();
|
||||
|
||||
share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt).await
|
||||
share_cipher_by_uuid(&cipher_id, data, &headers, &mut conn, &nt, None).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
@ -973,11 +974,16 @@ async fn put_cipher_share_selected(
|
|||
};
|
||||
|
||||
match shared_cipher_data.cipher.id.take() {
|
||||
Some(id) => share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt).await?,
|
||||
Some(id) => {
|
||||
share_cipher_by_uuid(&id, shared_cipher_data, &headers, &mut conn, &nt, Some(UpdateType::None)).await?
|
||||
}
|
||||
None => err!("Request missing ids field"),
|
||||
};
|
||||
}
|
||||
|
||||
// Multi share actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -987,6 +993,7 @@ async fn share_cipher_by_uuid(
|
|||
headers: &Headers,
|
||||
conn: &mut DbConn,
|
||||
nt: &Notify<'_>,
|
||||
override_ut: Option<UpdateType>,
|
||||
) -> JsonResult {
|
||||
let mut cipher = match Cipher::find_by_uuid(cipher_id, conn).await {
|
||||
Some(cipher) => {
|
||||
|
@ -1018,7 +1025,10 @@ async fn share_cipher_by_uuid(
|
|||
};
|
||||
|
||||
// When LastKnownRevisionDate is None, it is a new cipher, so send CipherCreate.
|
||||
let ut = if data.cipher.last_known_revision_date.is_some() {
|
||||
// If there is an override, like when handling multiple items, we want to prevent a push notification for every single item
|
||||
let ut = if let Some(ut) = override_ut {
|
||||
ut
|
||||
} else if data.cipher.last_known_revision_date.is_some() {
|
||||
UpdateType::SyncCipherUpdate
|
||||
} else {
|
||||
UpdateType::SyncCipherCreate
|
||||
|
@ -1517,7 +1527,7 @@ async fn delete_cipher_selected_put_admin(
|
|||
|
||||
#[put("/ciphers/<cipher_id>/restore")]
|
||||
async fn restore_cipher_put(cipher_id: CipherId, headers: Headers, mut conn: DbConn, nt: Notify<'_>) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&cipher_id, &headers, &mut conn, &nt).await
|
||||
_restore_cipher_by_uuid(&cipher_id, &headers, false, &mut conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/<cipher_id>/restore-admin")]
|
||||
|
@ -1527,7 +1537,17 @@ async fn restore_cipher_put_admin(
|
|||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
_restore_cipher_by_uuid(&cipher_id, &headers, &mut conn, &nt).await
|
||||
_restore_cipher_by_uuid(&cipher_id, &headers, false, &mut conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/restore-admin", data = "<data>")]
|
||||
async fn restore_cipher_selected_admin(
|
||||
data: Json<CipherIdsData>,
|
||||
headers: Headers,
|
||||
mut conn: DbConn,
|
||||
nt: Notify<'_>,
|
||||
) -> JsonResult {
|
||||
_restore_multiple_ciphers(data, &headers, &mut conn, &nt).await
|
||||
}
|
||||
|
||||
#[put("/ciphers/restore", data = "<data>")]
|
||||
|
@ -1555,35 +1575,47 @@ async fn move_cipher_selected(
|
|||
nt: Notify<'_>,
|
||||
) -> EmptyResult {
|
||||
let data = data.into_inner();
|
||||
let user_id = headers.user.uuid;
|
||||
let user_id = &headers.user.uuid;
|
||||
|
||||
if let Some(ref folder_id) = data.folder_id {
|
||||
if Folder::find_by_uuid_and_user(folder_id, &user_id, &mut conn).await.is_none() {
|
||||
if Folder::find_by_uuid_and_user(folder_id, user_id, &mut conn).await.is_none() {
|
||||
err!("Invalid folder", "Folder does not exist or belongs to another user");
|
||||
}
|
||||
}
|
||||
|
||||
for cipher_id in data.ids {
|
||||
let Some(cipher) = Cipher::find_by_uuid(&cipher_id, &mut conn).await else {
|
||||
err!("Cipher doesn't exist")
|
||||
};
|
||||
let cipher_count = data.ids.len();
|
||||
let mut single_cipher: Option<Cipher> = None;
|
||||
|
||||
if !cipher.is_accessible_to_user(&user_id, &mut conn).await {
|
||||
err!("Cipher is not accessible by user")
|
||||
// TODO: Convert this to use a single query (or at least less) to update all items
|
||||
// Find all ciphers a user has access to, all others will be ignored
|
||||
let accessible_ciphers = Cipher::find_by_user_and_ciphers(user_id, &data.ids, &mut conn).await;
|
||||
let accessible_ciphers_count = accessible_ciphers.len();
|
||||
for cipher in accessible_ciphers {
|
||||
cipher.move_to_folder(data.folder_id.clone(), user_id, &mut conn).await?;
|
||||
if cipher_count == 1 {
|
||||
single_cipher = Some(cipher);
|
||||
}
|
||||
}
|
||||
|
||||
// Move cipher
|
||||
cipher.move_to_folder(data.folder_id.clone(), &user_id, &mut conn).await?;
|
||||
|
||||
if let Some(cipher) = single_cipher {
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
std::slice::from_ref(&user_id),
|
||||
std::slice::from_ref(user_id),
|
||||
&headers.device,
|
||||
None,
|
||||
&mut conn,
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, &mut conn).await;
|
||||
}
|
||||
|
||||
if cipher_count != accessible_ciphers_count {
|
||||
err!(format!(
|
||||
"Not all ciphers are moved! {accessible_ciphers_count} of the selected {cipher_count} were moved."
|
||||
))
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -1764,6 +1796,7 @@ async fn _delete_multiple_ciphers(
|
|||
async fn _restore_cipher_by_uuid(
|
||||
cipher_id: &CipherId,
|
||||
headers: &Headers,
|
||||
multi_restore: bool,
|
||||
conn: &mut DbConn,
|
||||
nt: &Notify<'_>,
|
||||
) -> JsonResult {
|
||||
|
@ -1778,15 +1811,17 @@ async fn _restore_cipher_by_uuid(
|
|||
cipher.deleted_at = None;
|
||||
cipher.save(conn).await?;
|
||||
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
.await;
|
||||
if !multi_restore {
|
||||
nt.send_cipher_update(
|
||||
UpdateType::SyncCipherUpdate,
|
||||
&cipher,
|
||||
&cipher.update_users_revision(conn).await,
|
||||
&headers.device,
|
||||
None,
|
||||
conn,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
if let Some(org_id) = &cipher.organization_uuid {
|
||||
log_event(
|
||||
|
@ -1814,12 +1849,15 @@ async fn _restore_multiple_ciphers(
|
|||
|
||||
let mut ciphers: Vec<Value> = Vec::new();
|
||||
for cipher_id in data.ids {
|
||||
match _restore_cipher_by_uuid(&cipher_id, headers, conn, nt).await {
|
||||
match _restore_cipher_by_uuid(&cipher_id, headers, true, conn, nt).await {
|
||||
Ok(json) => ciphers.push(json.into_inner()),
|
||||
err => return err,
|
||||
}
|
||||
}
|
||||
|
||||
// Multi move actions do not send out a push for each cipher, we need to send a general sync here
|
||||
nt.send_user_update(UpdateType::SyncCiphers, &headers.user, &headers.device.push_uuid, conn).await;
|
||||
|
||||
Ok(Json(json!({
|
||||
"data": ciphers,
|
||||
"object": "list",
|
||||
|
|
|
@ -2310,7 +2310,7 @@ struct OrgImportData {
|
|||
users: Vec<OrgImportUserData>,
|
||||
}
|
||||
|
||||
/// This function seems to be deprected
|
||||
/// This function seems to be deprecated
|
||||
/// It is only used with older directory connectors
|
||||
/// TODO: Cleanup Tech debt
|
||||
#[post("/organizations/<org_id>/import", data = "<data>")]
|
||||
|
|
|
@ -24,6 +24,7 @@ pub fn routes() -> Vec<Route> {
|
|||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct SendEmailLoginData {
|
||||
#[serde(alias = "DeviceIdentifier")]
|
||||
device_identifier: DeviceId,
|
||||
|
||||
#[allow(unused)]
|
||||
|
|
|
@ -641,9 +641,9 @@ async fn stream_to_bytes_limit(res: Response, max_size: usize) -> Result<Bytes,
|
|||
let mut buf = BytesMut::new();
|
||||
let mut size = 0;
|
||||
while let Some(chunk) = stream.next().await {
|
||||
// It is possible that there might occure UnexpectedEof errors or others
|
||||
// It is possible that there might occur UnexpectedEof errors or others
|
||||
// This is most of the time no issue, and if there is no chunked data anymore or at all parsing the HTML will not happen anyway.
|
||||
// Therfore if chunk is an err, just break and continue with the data be have received.
|
||||
// Therefore if chunk is an err, just break and continue with the data be have received.
|
||||
if chunk.is_err() {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -293,7 +293,7 @@ async fn _sso_login(
|
|||
}
|
||||
};
|
||||
|
||||
// We passed 2FA get full user informations
|
||||
// We passed 2FA get full user information
|
||||
let auth_user = sso::redeem(&user_infos.state, conn).await?;
|
||||
|
||||
if sso_user.is_none() {
|
||||
|
@ -1060,12 +1060,12 @@ async fn oidcsignin_redirect(
|
|||
wrapper: impl FnOnce(OIDCState) -> sso::OIDCCodeWrapper,
|
||||
conn: &DbConn,
|
||||
) -> ApiResult<Redirect> {
|
||||
let state = sso::deocde_state(base64_state)?;
|
||||
let state = sso::decode_state(base64_state)?;
|
||||
let code = sso::encode_code_claims(wrapper(state.clone()));
|
||||
|
||||
let nonce = match SsoNonce::find(&state, conn).await {
|
||||
Some(n) => n,
|
||||
None => err!(format!("Failed to retrive redirect_uri with {state}")),
|
||||
None => err!(format!("Failed to retrieve redirect_uri with {state}")),
|
||||
};
|
||||
|
||||
let mut url = match url::Url::parse(&nonce.redirect_uri) {
|
||||
|
|
|
@ -61,7 +61,7 @@ fn vaultwarden_css() -> Cached<Css<String>> {
|
|||
"mail_enabled": CONFIG.mail_enabled(),
|
||||
"sends_allowed": CONFIG.sends_allowed(),
|
||||
"signup_disabled": CONFIG.is_signup_disabled(),
|
||||
"sso_disabled": !CONFIG.sso_enabled(),
|
||||
"sso_enabled": CONFIG.sso_enabled(),
|
||||
"sso_only": CONFIG.sso_enabled() && CONFIG.sso_only(),
|
||||
"yubico_enabled": CONFIG._enable_yubico() && CONFIG.yubico_client_id().is_some() && CONFIG.yubico_secret_key().is_some(),
|
||||
});
|
||||
|
|
|
@ -1174,7 +1174,7 @@ impl AuthTokens {
|
|||
|
||||
let access_claims = LoginJwtClaims::default(device, user, &sub, client_id);
|
||||
|
||||
let validity = if DeviceType::is_mobile(&device.atype) {
|
||||
let validity = if device.is_mobile() {
|
||||
*MOBILE_REFRESH_VALIDITY
|
||||
} else {
|
||||
*DEFAULT_REFRESH_VALIDITY
|
||||
|
|
|
@ -283,6 +283,9 @@ macro_rules! make_config {
|
|||
"smtp_host",
|
||||
"smtp_username",
|
||||
"_smtp_img_src",
|
||||
"sso_client_id",
|
||||
"sso_authority",
|
||||
"sso_callback_path",
|
||||
];
|
||||
|
||||
let cfg = {
|
||||
|
@ -715,7 +718,7 @@ make_config! {
|
|||
sso_master_password_policy: String, true, option;
|
||||
/// Use SSO only for auth not the session lifecycle |> Use default Vaultwarden session lifecycle (Idle refresh token valid for 30days)
|
||||
sso_auth_only_not_session: bool, true, def, false;
|
||||
/// Client cache for discovery endpoint. |> Duration in seconds (0 or less to disable). More details: https://github.com/dani-garcia/vaultwarden/blob/sso-support/SSO.md#client-cache
|
||||
/// Client cache for discovery endpoint. |> Duration in seconds (0 or less to disable). More details: https://github.com/dani-garcia/vaultwarden/wiki/Enabling-SSO-support-using-OpenId-Connect#client-cache
|
||||
sso_client_cache_expiration: u64, true, def, 0;
|
||||
/// Log all tokens |> `LOG_LEVEL=debug` or `LOG_LEVEL=info,vaultwarden::sso=debug` is required
|
||||
sso_debug_tokens: bool, true, def, false;
|
||||
|
@ -1145,7 +1148,7 @@ fn validate_config(cfg: &ConfigItems) -> Result<(), Error> {
|
|||
|
||||
fn validate_internal_sso_issuer_url(sso_authority: &String) -> Result<openidconnect::IssuerUrl, Error> {
|
||||
match openidconnect::IssuerUrl::new(sso_authority.clone()) {
|
||||
Err(err) => err!(format!("Invalid sso_authority UR ({sso_authority}): {err}")),
|
||||
Err(err) => err!(format!("Invalid sso_authority URL ({sso_authority}): {err}")),
|
||||
Ok(issuer_url) => Ok(issuer_url),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ use macros::UuidFromParam;
|
|||
use serde_json::Value;
|
||||
|
||||
db_object! {
|
||||
#[derive(Debug, Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)]
|
||||
#[derive(Identifiable, Queryable, Insertable, AsChangeset, Deserialize, Serialize)]
|
||||
#[diesel(table_name = auth_requests)]
|
||||
#[diesel(treat_none_as_null = true)]
|
||||
#[diesel(primary_key(uuid))]
|
||||
|
|
|
@ -783,7 +783,12 @@ impl Cipher {
|
|||
// true, then the non-interesting ciphers will not be returned. As a
|
||||
// result, those ciphers will not appear in "My Vault" for the org
|
||||
// owner/admin, but they can still be accessed via the org vault view.
|
||||
pub async fn find_by_user(user_uuid: &UserId, visible_only: bool, conn: &mut DbConn) -> Vec<Self> {
|
||||
pub async fn find_by_user(
|
||||
user_uuid: &UserId,
|
||||
visible_only: bool,
|
||||
cipher_uuids: &Vec<CipherId>,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
if CONFIG.org_groups_enabled() {
|
||||
db_run! {conn: {
|
||||
let mut query = ciphers::table
|
||||
|
@ -821,7 +826,14 @@ impl Cipher {
|
|||
if !visible_only {
|
||||
query = query.or_filter(
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
// Only filter for one specific cipher
|
||||
if !cipher_uuids.is_empty() {
|
||||
query = query.filter(
|
||||
ciphers::uuid.eq_any(cipher_uuids)
|
||||
);
|
||||
}
|
||||
|
||||
query
|
||||
|
@ -850,11 +862,18 @@ impl Cipher {
|
|||
.or_filter(users_collections::user_uuid.eq(user_uuid)) // Access to collection
|
||||
.into_boxed();
|
||||
|
||||
if !visible_only {
|
||||
query = query.or_filter(
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
|
||||
);
|
||||
}
|
||||
if !visible_only {
|
||||
query = query.or_filter(
|
||||
users_organizations::atype.le(MembershipType::Admin as i32) // Org admin/owner
|
||||
);
|
||||
}
|
||||
|
||||
// Only filter for one specific cipher
|
||||
if !cipher_uuids.is_empty() {
|
||||
query = query.filter(
|
||||
ciphers::uuid.eq_any(cipher_uuids)
|
||||
);
|
||||
}
|
||||
|
||||
query
|
||||
.select(ciphers::all_columns)
|
||||
|
@ -866,7 +885,23 @@ impl Cipher {
|
|||
|
||||
// Find all ciphers visible to the specified user.
|
||||
pub async fn find_by_user_visible(user_uuid: &UserId, conn: &mut DbConn) -> Vec<Self> {
|
||||
Self::find_by_user(user_uuid, true, conn).await
|
||||
Self::find_by_user(user_uuid, true, &vec![], conn).await
|
||||
}
|
||||
|
||||
pub async fn find_by_user_and_ciphers(
|
||||
user_uuid: &UserId,
|
||||
cipher_uuids: &Vec<CipherId>,
|
||||
conn: &mut DbConn,
|
||||
) -> Vec<Self> {
|
||||
Self::find_by_user(user_uuid, true, cipher_uuids, conn).await
|
||||
}
|
||||
|
||||
pub async fn find_by_user_and_cipher(
|
||||
user_uuid: &UserId,
|
||||
cipher_uuid: &CipherId,
|
||||
conn: &mut DbConn,
|
||||
) -> Option<Self> {
|
||||
Self::find_by_user(user_uuid, true, &vec![cipher_uuid.clone()], conn).await.pop()
|
||||
}
|
||||
|
||||
// Find all ciphers directly owned by the specified user.
|
||||
|
|
|
@ -70,6 +70,10 @@ impl Device {
|
|||
pub fn is_cli(&self) -> bool {
|
||||
matches!(DeviceType::from_i32(self.atype), DeviceType::WindowsCLI | DeviceType::MacOsCLI | DeviceType::LinuxCLI)
|
||||
}
|
||||
|
||||
pub fn is_mobile(&self) -> bool {
|
||||
matches!(DeviceType::from_i32(self.atype), DeviceType::Android | DeviceType::Ios)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DeviceWithAuthRequest {
|
||||
|
@ -353,10 +357,6 @@ impl DeviceType {
|
|||
_ => DeviceType::UnknownBrowser,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_mobile(value: &i32) -> bool {
|
||||
*value == DeviceType::Android as i32 || *value == DeviceType::Ios as i32
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(
|
||||
|
|
|
@ -135,7 +135,7 @@ impl CollectionGroup {
|
|||
// If both read_only and hide_passwords are false, then manage should be true
|
||||
// You can't have an entry with read_only and manage, or hide_passwords and manage
|
||||
// Or an entry with everything to false
|
||||
// For backwards compaibility and migration proposes we keep checking read_only and hide_password
|
||||
// For backwards compatibility and migration proposes we keep checking read_only and hide_password
|
||||
json!({
|
||||
"id": self.groups_uuid,
|
||||
"readOnly": self.read_only,
|
||||
|
|
|
@ -151,7 +151,7 @@ fn decode_token_claims(token_name: &str, token: &str) -> ApiResult<BasicTokenCla
|
|||
}
|
||||
}
|
||||
|
||||
pub fn deocde_state(base64_state: String) -> ApiResult<OIDCState> {
|
||||
pub fn decode_state(base64_state: String) -> ApiResult<OIDCState> {
|
||||
let state = match data_encoding::BASE64.decode(base64_state.as_bytes()) {
|
||||
Ok(vec) => match String::from_utf8(vec) {
|
||||
Ok(valid) => OIDCState(valid),
|
||||
|
@ -316,7 +316,7 @@ pub async fn exchange_code(wrapped_code: &str, conn: &mut DbConn) -> ApiResult<U
|
|||
user_name: user_name.clone(),
|
||||
};
|
||||
|
||||
debug!("Authentified user {authenticated_user:?}");
|
||||
debug!("Authenticated user {authenticated_user:?}");
|
||||
|
||||
AC_CACHE.insert(state.clone(), authenticated_user);
|
||||
|
||||
|
@ -443,7 +443,7 @@ pub async fn exchange_refresh_token(
|
|||
err_silent!("Access token is close to expiration but we have no refresh token")
|
||||
}
|
||||
|
||||
Client::check_validaty(access_token.clone()).await?;
|
||||
Client::check_validity(access_token.clone()).await?;
|
||||
|
||||
let access_claims = auth::LoginJwtClaims::new(
|
||||
device,
|
||||
|
|
|
@ -203,7 +203,7 @@ impl Client {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn check_validaty(access_token: String) -> EmptyResult {
|
||||
pub async fn check_validity(access_token: String) -> EmptyResult {
|
||||
let client = Client::cached().await?;
|
||||
match client.user_info(AccessToken::new(access_token)).await {
|
||||
Err(err) => {
|
||||
|
|
|
@ -21,21 +21,21 @@ a[href$="/settings/sponsored-families"] {
|
|||
}
|
||||
|
||||
/* Hide the sso `Email` input field */
|
||||
{{#if sso_disabled}}
|
||||
{{#if (not sso_enabled)}}
|
||||
.vw-email-sso {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/if}}
|
||||
|
||||
/* Hide the default/continue `Email` input field */
|
||||
{{#if (not sso_disabled)}}
|
||||
{{#if sso_enabled}}
|
||||
.vw-email-continue {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
{{/if}}
|
||||
|
||||
/* Hide the `Continue` button on the login page */
|
||||
{{#if (not sso_disabled)}}
|
||||
{{#if sso_enabled}}
|
||||
.vw-continue-login {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ a[href$="/settings/sponsored-families"] {
|
|||
|
||||
/* Hide the `Enterprise Single Sign-On` button on the login page */
|
||||
{{#if (webver ">=2025.5.1")}}
|
||||
{{#if sso_disabled}}
|
||||
{{#if (not sso_enabled)}}
|
||||
.vw-sso-login {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ app-root ng-component > form > div:nth-child(1) > div > button[buttontype="secon
|
|||
|
||||
/* Hide the or text followed by the two buttons hidden above */
|
||||
{{#if (webver ">=2025.5.1")}}
|
||||
{{#if (or sso_disabled sso_only)}}
|
||||
{{#if (or (not sso_enabled) sso_only)}}
|
||||
.vw-or-text {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ app-root ng-component > form > div:nth-child(1) > div:nth-child(3) > div:nth-chi
|
|||
{{/if}}
|
||||
|
||||
/* Hide the `Other` button on the login page */
|
||||
{{#if (or sso_disabled sso_only)}}
|
||||
{{#if (or (not sso_enabled) sso_only)}}
|
||||
.vw-other-login {
|
||||
@extend %vw-hide;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue