mirror of
https://github.com/AppFlowy-IO/AppFlowy-Cloud.git
synced 2025-04-19 03:24:42 -04:00
Merge pull request #1286 from AppFlowy-IO/migrate-af-collab
Migrate af_collab to non-partitioned table with UUID keys
This commit is contained in:
commit
d4c45528a5
144 changed files with 2727 additions and 2720 deletions
2
.github/workflows/integration_test.yml
vendored
2
.github/workflows/integration_test.yml
vendored
|
@ -30,6 +30,7 @@ env:
|
|||
DATABASE_URL: postgres://postgres:password@localhost:5432/postgres
|
||||
SQLX_OFFLINE: true
|
||||
RUST_TOOLCHAIN: "1.80"
|
||||
APPFLOWY_AI_VERSION: "0.9.27-test"
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
|
@ -123,6 +124,7 @@ jobs:
|
|||
export APPFLOWY_WORKER_VERSION=${GITHUB_SHA}
|
||||
export APPFLOWY_CLOUD_VERSION=${GITHUB_SHA}
|
||||
export APPFLOWY_ADMIN_FRONTEND_VERSION=${GITHUB_SHA}
|
||||
export APPFLOWY_AI_VERSION=${{ env.APPFLOWY_AI_VERSION }}
|
||||
docker compose -f docker-compose-ci.yml up -d
|
||||
docker ps -a
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
{
|
||||
"ordinal": 0,
|
||||
"name": "oid",
|
||||
"type_info": "Text"
|
||||
"type_info": "Uuid"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\nSELECT\n w.settings['disable_search_indexing']::boolean as disable_search_indexing,\n CASE\n WHEN w.settings['disable_search_indexing']::boolean THEN\n FALSE\n ELSE\n EXISTS (SELECT 1 FROM af_collab_embeddings m WHERE m.partition_key = $3 AND m.oid = $2)\n END as has_index\nFROM af_workspace w\nWHERE w.workspace_id = $1",
|
||||
"query": "\nSELECT\n w.settings['disable_search_indexing']::boolean as disable_search_indexing,\n CASE\n WHEN w.settings['disable_search_indexing']::boolean THEN\n FALSE\n ELSE\n EXISTS (SELECT 1 FROM af_collab_embeddings m WHERE m.oid = $2::uuid)\n END as has_index\nFROM af_workspace w\nWHERE w.workspace_id = $1",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
|
@ -17,8 +17,7 @@
|
|||
"parameters": {
|
||||
"Left": [
|
||||
"Uuid",
|
||||
"Text",
|
||||
"Int4"
|
||||
"Uuid"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
|
@ -26,5 +25,5 @@
|
|||
null
|
||||
]
|
||||
},
|
||||
"hash": "773aac7e401c3e6c04d1dc8ea412b9678b7227832a3487270d724f623072fe89"
|
||||
"hash": "2c0a776a787bc748857873b682d2fa3c549ffeaf767aa8ee05b09b3857505ded"
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT oid, indexed_at\n FROM af_collab\n WHERE (oid, partition_key) = ANY (\n SELECT UNNEST($1::text[]), UNNEST($2::int[])\n )\n ",
|
||||
"query": "\n SELECT oid, indexed_at\n FROM af_collab\n WHERE oid = ANY (SELECT UNNEST($1::uuid[]))\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"ordinal": 0,
|
||||
"name": "oid",
|
||||
"type_info": "Text"
|
||||
"type_info": "Uuid"
|
||||
},
|
||||
{
|
||||
"ordinal": 1,
|
||||
|
@ -16,8 +16,7 @@
|
|||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"TextArray",
|
||||
"Int4Array"
|
||||
"UuidArray"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
|
@ -25,5 +24,5 @@
|
|||
true
|
||||
]
|
||||
},
|
||||
"hash": "f8c909517885cb30e3f7d573edf47138f90ea9c5fa73eb927cc5487c3d9ad0be"
|
||||
"hash": "3865d921d76ac0d0eb16065738cddf82cb71945504116b0a04da759209b9c250"
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT\n ac.oid AS object_id,\n ac.partition_key,\n ac.indexed_at,\n ace.updated_at\n FROM af_collab_embeddings ac\n JOIN af_collab ace\n ON ac.oid = ace.oid\n AND ac.partition_key = ace.partition_key\n WHERE ac.oid = $1 AND ac.partition_key = $2\n ",
|
||||
"query": "\n SELECT\n ac.oid as object_id,\n ace.partition_key,\n ac.indexed_at,\n ace.updated_at\n FROM af_collab_embeddings ac\n JOIN af_collab ace ON ac.oid = ace.oid\n WHERE ac.oid = $1\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"ordinal": 0,
|
||||
"name": "object_id",
|
||||
"type_info": "Text"
|
||||
"type_info": "Uuid"
|
||||
},
|
||||
{
|
||||
"ordinal": 1,
|
||||
|
@ -26,8 +26,7 @@
|
|||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Text",
|
||||
"Int4"
|
||||
"Uuid"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
|
@ -37,5 +36,5 @@
|
|||
false
|
||||
]
|
||||
},
|
||||
"hash": "968c7a6f13255220b3d497d9a1edb181b062747d6463e400158cfdc753a82c5b"
|
||||
"hash": "4fc0611c846f86be652d42eb8ae21a5da0353fe810856aaabe91d7963329d098"
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n DELETE FROM af_collab_embeddings e\n USING af_collab c\n WHERE e.oid = c.oid\n AND e.partition_key = c.partition_key\n AND c.workspace_id = $1\n ",
|
||||
"query": "\n DELETE FROM af_collab_embeddings e\n USING af_collab c\n WHERE e.oid = c.oid\n AND c.workspace_id = $1\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
|
@ -10,5 +10,5 @@
|
|||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "cbe43eb734e0afd865a7c1082ba7ded940d66320270d5ab4431271dd50a9d50b"
|
||||
"hash": "5c2d58bfdedbb1be71337a97d5ed5a2921f83dd549507b2834a4d2582d2c361b"
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT oid, blob\n FROM af_collab\n WHERE oid = ANY($1) AND partition_key = $2 AND deleted_at IS NULL;\n ",
|
||||
"query": "\n SELECT oid, blob\n FROM af_collab\n WHERE oid = ANY($1) AND deleted_at IS NULL;\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"ordinal": 0,
|
||||
"name": "oid",
|
||||
"type_info": "Text"
|
||||
"type_info": "Uuid"
|
||||
},
|
||||
{
|
||||
"ordinal": 1,
|
||||
|
@ -16,8 +16,7 @@
|
|||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"TextArray",
|
||||
"Int4"
|
||||
"UuidArray"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
|
@ -25,5 +24,5 @@
|
|||
false
|
||||
]
|
||||
},
|
||||
"hash": "a7f47366a4016e10dfe9195f865ca0f0a2877738144afbd82844d75c4ea0ea8e"
|
||||
"hash": "6935572cb23700243fbbd3dc382cdbf56edaadc4aab7855c237bce68e29414c0"
|
||||
}
|
|
@ -11,7 +11,7 @@
|
|||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Text",
|
||||
"Uuid",
|
||||
"Int4"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT\n updated_at as updated_at,\n oid as row_id\n FROM af_collab_database_row\n WHERE workspace_id = $1\n AND oid = ANY($2)\n AND updated_at > $3\n ",
|
||||
"query": "\n SELECT\n updated_at as updated_at,\n oid as row_id\n FROM af_collab\n WHERE workspace_id = $1\n AND oid = ANY($2)\n AND updated_at > $3\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
|
@ -11,13 +11,13 @@
|
|||
{
|
||||
"ordinal": 1,
|
||||
"name": "row_id",
|
||||
"type_info": "Text"
|
||||
"type_info": "Uuid"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Uuid",
|
||||
"TextArray",
|
||||
"UuidArray",
|
||||
"Timestamptz"
|
||||
]
|
||||
},
|
||||
|
@ -26,5 +26,5 @@
|
|||
false
|
||||
]
|
||||
},
|
||||
"hash": "1331f64dbbf63fc694e3358aefd2bdc4b3bcff64eda36420acde1a948884239d"
|
||||
"hash": "6ca2a2fa10d5334183d98176998d41f36948fe5624e290a32d0b50bc9fb256bf"
|
||||
}
|
|
@ -5,7 +5,7 @@
|
|||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Text",
|
||||
"Uuid",
|
||||
"Timestamptz"
|
||||
]
|
||||
},
|
||||
|
|
19
.sqlx/query-8724214da0311c43988035526454ed1747ebc89dc350ee45827381418f71313e.json
generated
Normal file
19
.sqlx/query-8724214da0311c43988035526454ed1747ebc89dc350ee45827381418f71313e.json
generated
Normal file
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n INSERT INTO af_collab (oid, blob, len, partition_key, owner_uid, workspace_id)\n VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (oid)\n DO UPDATE SET blob = $2, len = $3, owner_uid = $5 WHERE excluded.workspace_id = af_collab.workspace_id;\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Uuid",
|
||||
"Bytea",
|
||||
"Int4",
|
||||
"Int4",
|
||||
"Int8",
|
||||
"Uuid"
|
||||
]
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "8724214da0311c43988035526454ed1747ebc89dc350ee45827381418f71313e"
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n INSERT INTO af_collab (oid, blob, len, partition_key, encrypt, owner_uid, workspace_id)\n SELECT * FROM UNNEST($1::uuid[], $2::bytea[], $3::int[], $4::int[], $5::int[], $6::bigint[], $7::uuid[])\n ON CONFLICT (oid, partition_key)\n DO UPDATE SET blob = excluded.blob, len = excluded.len, encrypt = excluded.encrypt where af_collab.workspace_id = excluded.workspace_id\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"UuidArray",
|
||||
"ByteaArray",
|
||||
"Int4Array",
|
||||
"Int4Array",
|
||||
"Int4Array",
|
||||
"Int8Array",
|
||||
"UuidArray"
|
||||
]
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "8df42aa8353a5fa510c0ab23412daebb263e8cf57b62e838460882cbf09cd551"
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n INSERT INTO af_collab (oid, blob, len, partition_key, encrypt, owner_uid, workspace_id)\n VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (oid, partition_key)\n DO UPDATE SET blob = $2, len = $3, encrypt = $5, owner_uid = $6 WHERE excluded.workspace_id = af_collab.workspace_id;\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Text",
|
||||
"Bytea",
|
||||
"Int4",
|
||||
"Int4",
|
||||
"Int4",
|
||||
"Int8",
|
||||
"Uuid"
|
||||
]
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "c62e3c19160fdbcf2ef7bc2c85ec012f628d593c8b2eba5e6ef3ba313045a696"
|
||||
}
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT\n ac.oid AS object_id,\n ac.partition_key,\n ac.indexed_at,\n ace.updated_at\n FROM af_collab_embeddings ac\n JOIN af_collab ace\n ON ac.oid = ace.oid\n AND ac.partition_key = ace.partition_key\n WHERE ac.oid = ANY($1) AND ac.partition_key = ANY($2)\n ",
|
||||
"query": "\n SELECT\n ac.oid as object_id,\n ace.partition_key,\n ac.indexed_at,\n ace.updated_at\n FROM af_collab_embeddings ac\n JOIN af_collab ace ON ac.oid = ace.oid\n WHERE ac.oid = ANY($1)\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"ordinal": 0,
|
||||
"name": "object_id",
|
||||
"type_info": "Text"
|
||||
"type_info": "Uuid"
|
||||
},
|
||||
{
|
||||
"ordinal": 1,
|
||||
|
@ -26,8 +26,7 @@
|
|||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"TextArray",
|
||||
"Int4Array"
|
||||
"UuidArray"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
|
@ -37,5 +36,5 @@
|
|||
false
|
||||
]
|
||||
},
|
||||
"hash": "cdbbea42600d61b6541808867397de29e2e5df569faefa098254f1afd3aa662d"
|
||||
"hash": "cbf1d3d9fdeb672eacd4b008879787bc1f0b22a554fb249d4e12a665d9767cbd"
|
||||
}
|
|
@ -6,7 +6,7 @@
|
|||
"parameters": {
|
||||
"Left": [
|
||||
"Timestamptz",
|
||||
"Text",
|
||||
"Uuid",
|
||||
"Int4"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Text"
|
||||
"Uuid"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
|
|
41
.sqlx/query-ed9bce7f35c4dd8d41427bc56db67adf175044a8d31149b3745ceb8f9b3c82fa.json
generated
Normal file
41
.sqlx/query-ed9bce7f35c4dd8d41427bc56db67adf175044a8d31149b3745ceb8f9b3c82fa.json
generated
Normal file
|
@ -0,0 +1,41 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT oid, snapshot, snapshot_version, created_at\n FROM af_snapshot_meta\n WHERE oid = $1 AND partition_key = $2\n ORDER BY created_at DESC",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"ordinal": 0,
|
||||
"name": "oid",
|
||||
"type_info": "Text"
|
||||
},
|
||||
{
|
||||
"ordinal": 1,
|
||||
"name": "snapshot",
|
||||
"type_info": "Bytea"
|
||||
},
|
||||
{
|
||||
"ordinal": 2,
|
||||
"name": "snapshot_version",
|
||||
"type_info": "Int4"
|
||||
},
|
||||
{
|
||||
"ordinal": 3,
|
||||
"name": "created_at",
|
||||
"type_info": "Int8"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Text",
|
||||
"Int4"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "ed9bce7f35c4dd8d41427bc56db67adf175044a8d31149b3745ceb8f9b3c82fa"
|
||||
}
|
|
@ -6,7 +6,7 @@
|
|||
{
|
||||
"ordinal": 0,
|
||||
"name": "oid",
|
||||
"type_info": "Text"
|
||||
"type_info": "Uuid"
|
||||
},
|
||||
{
|
||||
"ordinal": 1,
|
||||
|
@ -26,7 +26,7 @@
|
|||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Text",
|
||||
"Uuid",
|
||||
"Int4"
|
||||
]
|
||||
},
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
{
|
||||
"ordinal": 1,
|
||||
"name": "oid",
|
||||
"type_info": "Text"
|
||||
"type_info": "Uuid"
|
||||
},
|
||||
{
|
||||
"ordinal": 2,
|
||||
|
|
19
.sqlx/query-fbe0746688157bf563bd6a8fb707ef9553c6751c3dd214f461f2de087f8b29c4.json
generated
Normal file
19
.sqlx/query-fbe0746688157bf563bd6a8fb707ef9553c6751c3dd214f461f2de087f8b29c4.json
generated
Normal file
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n INSERT INTO af_collab (oid, blob, len, partition_key, owner_uid, workspace_id)\n SELECT * FROM UNNEST($1::uuid[], $2::bytea[], $3::int[], $4::int[], $5::bigint[], $6::uuid[])\n ON CONFLICT (oid)\n DO UPDATE SET blob = excluded.blob, len = excluded.len where af_collab.workspace_id = excluded.workspace_id\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"UuidArray",
|
||||
"ByteaArray",
|
||||
"Int4Array",
|
||||
"Int4Array",
|
||||
"Int8Array",
|
||||
"UuidArray"
|
||||
]
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "fbe0746688157bf563bd6a8fb707ef9553c6751c3dd214f461f2de087f8b29c4"
|
||||
}
|
1
Cargo.lock
generated
1
Cargo.lock
generated
|
@ -2088,6 +2088,7 @@ dependencies = [
|
|||
"thiserror 1.0.63",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"uuid",
|
||||
"yrs",
|
||||
]
|
||||
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
use app_error::AppError;
|
||||
use async_trait::async_trait;
|
||||
use database_entity::dto::AFAccessLevel;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{
|
||||
act::Action,
|
||||
collab::{CollabAccessControl, RealtimeAccessControl},
|
||||
entity::ObjectType,
|
||||
};
|
||||
use app_error::AppError;
|
||||
use async_trait::async_trait;
|
||||
use database_entity::dto::AFAccessLevel;
|
||||
use tracing::instrument;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::access::AccessControl;
|
||||
|
||||
|
@ -26,9 +26,9 @@ impl CollabAccessControlImpl {
|
|||
impl CollabAccessControl for CollabAccessControlImpl {
|
||||
async fn enforce_action(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
action: Action,
|
||||
) -> Result<(), AppError> {
|
||||
// TODO: allow non workspace member to read a collab.
|
||||
|
@ -57,9 +57,9 @@ impl CollabAccessControl for CollabAccessControlImpl {
|
|||
|
||||
async fn enforce_access_level(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
access_level: AFAccessLevel,
|
||||
) -> Result<(), AppError> {
|
||||
// TODO: allow non workspace member to read a collab.
|
||||
|
@ -91,7 +91,7 @@ impl CollabAccessControl for CollabAccessControlImpl {
|
|||
async fn update_access_level_policy(
|
||||
&self,
|
||||
_uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
_level: AFAccessLevel,
|
||||
) -> Result<(), AppError> {
|
||||
// TODO: allow non workspace member to read a collab.
|
||||
|
@ -99,7 +99,7 @@ impl CollabAccessControl for CollabAccessControlImpl {
|
|||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
async fn remove_access_level(&self, _uid: &i64, _oid: &str) -> Result<(), AppError> {
|
||||
async fn remove_access_level(&self, _uid: &i64, _oid: &Uuid) -> Result<(), AppError> {
|
||||
// TODO: allow non workspace member to read a collab.
|
||||
Ok(())
|
||||
}
|
||||
|
@ -117,9 +117,9 @@ impl RealtimeCollabAccessControlImpl {
|
|||
|
||||
async fn can_perform_action(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
required_action: Action,
|
||||
) -> Result<bool, AppError> {
|
||||
// TODO: allow non workspace member to read a collab.
|
||||
|
@ -146,9 +146,9 @@ impl RealtimeCollabAccessControlImpl {
|
|||
impl RealtimeAccessControl for RealtimeCollabAccessControlImpl {
|
||||
async fn can_write_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
) -> Result<bool, AppError> {
|
||||
self
|
||||
.can_perform_action(workspace_id, uid, oid, Action::Write)
|
||||
|
@ -157,9 +157,9 @@ impl RealtimeAccessControl for RealtimeCollabAccessControlImpl {
|
|||
|
||||
async fn can_read_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
) -> Result<bool, AppError> {
|
||||
self
|
||||
.can_perform_action(workspace_id, uid, oid, Action::Read)
|
||||
|
@ -170,6 +170,7 @@ impl RealtimeAccessControl for RealtimeCollabAccessControlImpl {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
use database_entity::dto::AFRole;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
act::Action,
|
||||
|
@ -182,8 +183,8 @@ mod tests {
|
|||
pub async fn test_collab_access_control() {
|
||||
let enforcer = test_enforcer().await;
|
||||
let uid = 1;
|
||||
let workspace_id = "w1";
|
||||
let oid = "o1";
|
||||
let workspace_id = Uuid::new_v4();
|
||||
let oid = Uuid::new_v4();
|
||||
enforcer
|
||||
.update_policy(
|
||||
SubjectType::User(uid),
|
||||
|
@ -196,7 +197,7 @@ mod tests {
|
|||
let collab_access_control = super::CollabAccessControlImpl::new(access_control);
|
||||
for action in [Action::Read, Action::Write, Action::Delete] {
|
||||
collab_access_control
|
||||
.enforce_action(workspace_id, &uid, oid, action.clone())
|
||||
.enforce_action(&workspace_id, &uid, &oid, action.clone())
|
||||
.await
|
||||
.unwrap_or_else(|_| panic!("Failed to enforce action: {:?}", action));
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ impl WorkspaceAccessControl for WorkspaceAccessControlImpl {
|
|||
async fn enforce_role(
|
||||
&self,
|
||||
uid: &i64,
|
||||
workspace_id: Uuid,
|
||||
workspace_id: &Uuid,
|
||||
role: AFRole,
|
||||
) -> Result<(), AppError> {
|
||||
let result = self
|
||||
|
@ -42,7 +42,7 @@ impl WorkspaceAccessControl for WorkspaceAccessControlImpl {
|
|||
async fn enforce_action(
|
||||
&self,
|
||||
uid: &i64,
|
||||
workspace_id: Uuid,
|
||||
workspace_id: &Uuid,
|
||||
action: Action,
|
||||
) -> Result<(), AppError> {
|
||||
let result = self
|
||||
|
@ -137,21 +137,21 @@ mod tests {
|
|||
let workspace_access_control = super::WorkspaceAccessControlImpl::new(access_control);
|
||||
for uid in [member_uid, owner_uid] {
|
||||
workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await
|
||||
.unwrap_or_else(|_| panic!("Failed to enforce role for {}", uid));
|
||||
workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, crate::act::Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, crate::act::Action::Read)
|
||||
.await
|
||||
.unwrap_or_else(|_| panic!("Failed to enforce action for {}", uid));
|
||||
}
|
||||
let result = workspace_access_control
|
||||
.enforce_action(&member_uid, workspace_id, crate::act::Action::Delete)
|
||||
.enforce_action(&member_uid, &workspace_id, crate::act::Action::Delete)
|
||||
.await;
|
||||
let error_code = result.unwrap_err().code();
|
||||
assert_eq!(error_code, ErrorCode::NotEnoughPermissions);
|
||||
workspace_access_control
|
||||
.enforce_action(&owner_uid, workspace_id, crate::act::Action::Delete)
|
||||
.enforce_action(&owner_uid, &workspace_id, crate::act::Action::Delete)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ use crate::act::Action;
|
|||
use app_error::AppError;
|
||||
use async_trait::async_trait;
|
||||
use database_entity::dto::AFAccessLevel;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[async_trait]
|
||||
pub trait CollabAccessControl: Sync + Send + 'static {
|
||||
|
@ -9,9 +10,9 @@ pub trait CollabAccessControl: Sync + Send + 'static {
|
|||
/// Returns AppError::NotEnoughPermission if the user does not have the permission.
|
||||
async fn enforce_action(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
action: Action,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
|
@ -19,9 +20,9 @@ pub trait CollabAccessControl: Sync + Send + 'static {
|
|||
/// Returns AppError::NotEnoughPermission if the user does not have the access level.
|
||||
async fn enforce_access_level(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
access_level: AFAccessLevel,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
|
@ -29,11 +30,11 @@ pub trait CollabAccessControl: Sync + Send + 'static {
|
|||
async fn update_access_level_policy(
|
||||
&self,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
level: AFAccessLevel,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
async fn remove_access_level(&self, uid: &i64, oid: &str) -> Result<(), AppError>;
|
||||
async fn remove_access_level(&self, uid: &i64, oid: &Uuid) -> Result<(), AppError>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
@ -47,9 +48,9 @@ pub trait RealtimeAccessControl: Sync + Send + 'static {
|
|||
/// 3. If the collab object is not found which means the collab object is created by the user.
|
||||
async fn can_write_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
) -> Result<bool, AppError>;
|
||||
|
||||
/// Return true if the user is allowed to observe the changes of given collab.
|
||||
|
@ -58,8 +59,8 @@ pub trait RealtimeAccessControl: Sync + Send + 'static {
|
|||
/// The user can recv the message if the user is the member of the collab object
|
||||
async fn can_read_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
) -> Result<bool, AppError>;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use app_error::AppError;
|
||||
use async_trait::async_trait;
|
||||
use database_entity::dto::AFAccessLevel;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
act::Action,
|
||||
|
@ -26,9 +27,9 @@ impl Default for CollabAccessControlImpl {
|
|||
impl CollabAccessControl for CollabAccessControlImpl {
|
||||
async fn enforce_action(
|
||||
&self,
|
||||
_workspace_id: &str,
|
||||
_workspace_id: &Uuid,
|
||||
_uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
_action: Action,
|
||||
) -> Result<(), AppError> {
|
||||
Ok(())
|
||||
|
@ -36,9 +37,9 @@ impl CollabAccessControl for CollabAccessControlImpl {
|
|||
|
||||
async fn enforce_access_level(
|
||||
&self,
|
||||
_workspace_id: &str,
|
||||
_workspace_id: &Uuid,
|
||||
_uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
_access_level: AFAccessLevel,
|
||||
) -> Result<(), AppError> {
|
||||
Ok(())
|
||||
|
@ -47,13 +48,13 @@ impl CollabAccessControl for CollabAccessControlImpl {
|
|||
async fn update_access_level_policy(
|
||||
&self,
|
||||
_uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
_level: AFAccessLevel,
|
||||
) -> Result<(), AppError> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remove_access_level(&self, _uid: &i64, _oid: &str) -> Result<(), AppError> {
|
||||
async fn remove_access_level(&self, _uid: &i64, _oid: &Uuid) -> Result<(), AppError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -77,18 +78,18 @@ impl Default for RealtimeCollabAccessControlImpl {
|
|||
impl RealtimeAccessControl for RealtimeCollabAccessControlImpl {
|
||||
async fn can_write_collab(
|
||||
&self,
|
||||
_workspace_id: &str,
|
||||
_workspace_id: &Uuid,
|
||||
_uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
) -> Result<bool, AppError> {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
async fn can_read_collab(
|
||||
&self,
|
||||
_workspace_id: &str,
|
||||
_workspace_id: &Uuid,
|
||||
_uid: &i64,
|
||||
_oid: &str,
|
||||
_oid: &Uuid,
|
||||
) -> Result<bool, AppError> {
|
||||
Ok(true)
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ impl WorkspaceAccessControl for WorkspaceAccessControlImpl {
|
|||
async fn enforce_role(
|
||||
&self,
|
||||
_uid: &i64,
|
||||
_workspace_id: Uuid,
|
||||
_workspace_id: &Uuid,
|
||||
_role: AFRole,
|
||||
) -> Result<(), AppError> {
|
||||
Ok(())
|
||||
|
@ -35,7 +35,7 @@ impl WorkspaceAccessControl for WorkspaceAccessControlImpl {
|
|||
async fn enforce_action(
|
||||
&self,
|
||||
_uid: &i64,
|
||||
_workspace_id: Uuid,
|
||||
_workspace_id: &Uuid,
|
||||
_action: Action,
|
||||
) -> Result<(), AppError> {
|
||||
Ok(())
|
||||
|
|
|
@ -8,15 +8,19 @@ use sqlx::types::Uuid;
|
|||
pub trait WorkspaceAccessControl: Send + Sync + 'static {
|
||||
/// Check if the user has the role in the workspace.
|
||||
/// Returns AppError::NotEnoughPermission if the user does not have the role.
|
||||
async fn enforce_role(&self, uid: &i64, workspace_id: Uuid, role: AFRole)
|
||||
-> Result<(), AppError>;
|
||||
async fn enforce_role(
|
||||
&self,
|
||||
uid: &i64,
|
||||
workspace_id: &Uuid,
|
||||
role: AFRole,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
/// Check if the user can perform action on the workspace.
|
||||
/// Returns AppError::NotEnoughPermission if the user does not have the role.
|
||||
async fn enforce_action(
|
||||
&self,
|
||||
uid: &i64,
|
||||
workspace_id: Uuid,
|
||||
workspace_id: &Uuid,
|
||||
action: Action,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ futures = "0.3.30"
|
|||
bytes.workspace = true
|
||||
pin-project = "1.1.5"
|
||||
ureq = { version = "2.12.1", optional = true, features = ["json"] }
|
||||
uuid.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
appflowy-ai-client = { path = ".", features = ["dto", "client-api"] }
|
||||
|
|
|
@ -3,6 +3,8 @@ use serde_json::json;
|
|||
use serde_repr::{Deserialize_repr, Serialize_repr};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const STREAM_METADATA_KEY: &str = "0";
|
||||
pub const STREAM_ANSWER_KEY: &str = "1";
|
||||
pub const STREAM_IMAGE_KEY: &str = "2";
|
||||
|
@ -439,7 +441,7 @@ pub struct CustomPrompt {
|
|||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct CalculateSimilarityParams {
|
||||
pub workspace_id: String,
|
||||
pub workspace_id: Uuid,
|
||||
pub input: String,
|
||||
pub expected: String,
|
||||
pub use_embedding: bool,
|
||||
|
@ -459,11 +461,11 @@ pub struct CompletionMessage {
|
|||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct CompletionMetadata {
|
||||
/// A unique identifier for the object. Object could be a document id.
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
/// The workspace identifier.
|
||||
///
|
||||
/// This field must be provided when generating images. We use workspace ID to track image usage.
|
||||
pub workspace_id: Option<String>,
|
||||
pub workspace_id: Option<Uuid>,
|
||||
/// A list of relevant document IDs.
|
||||
///
|
||||
/// When using completions for document-related tasks, this should include the document ID.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use uuid::Uuid;
|
||||
|
||||
pub fn user_awareness_object_id(user_uuid: &Uuid, workspace_id: &str) -> Uuid {
|
||||
pub fn user_awareness_object_id(user_uuid: &Uuid, workspace_id: &Uuid) -> Uuid {
|
||||
Uuid::new_v5(
|
||||
user_uuid,
|
||||
format!("user_awareness:{}", workspace_id).as_bytes(),
|
||||
|
|
|
@ -68,12 +68,11 @@ pub fn localhost_client_with_device_id(device_id: &str) -> Client {
|
|||
)
|
||||
}
|
||||
|
||||
pub async fn workspace_id_from_client(c: &Client) -> String {
|
||||
pub async fn workspace_id_from_client(c: &Client) -> Uuid {
|
||||
c.get_workspaces()
|
||||
.await
|
||||
.unwrap()
|
||||
.first()
|
||||
.unwrap()
|
||||
.workspace_id
|
||||
.to_string()
|
||||
}
|
||||
|
|
|
@ -11,10 +11,11 @@ use database_entity::dto::QueryCollabResult::{Failed, Success};
|
|||
use database_entity::dto::{QueryCollab, QueryCollabParams};
|
||||
use std::sync::Arc;
|
||||
use tracing::error;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct TestDatabaseCollabService {
|
||||
pub api_client: client_api::Client,
|
||||
pub workspace_id: String,
|
||||
pub workspace_id: Uuid,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
@ -28,9 +29,9 @@ impl DatabaseCollabService for TestDatabaseCollabService {
|
|||
let encoded_collab = match encoded_collab {
|
||||
None => {
|
||||
let params = QueryCollabParams {
|
||||
workspace_id: self.workspace_id.clone(),
|
||||
workspace_id: self.workspace_id,
|
||||
inner: QueryCollab {
|
||||
object_id: object_id.to_string(),
|
||||
object_id: object_id.parse()?,
|
||||
collab_type: object_type,
|
||||
},
|
||||
};
|
||||
|
@ -62,7 +63,10 @@ impl DatabaseCollabService for TestDatabaseCollabService {
|
|||
) -> Result<EncodeCollabByOid, DatabaseError> {
|
||||
let params = object_ids
|
||||
.into_iter()
|
||||
.map(|object_id| QueryCollab::new(object_id, collab_type))
|
||||
.flat_map(|object_id| match Uuid::parse_str(&object_id) {
|
||||
Ok(object_id) => Ok(QueryCollab::new(object_id, collab_type)),
|
||||
Err(err) => Err(err),
|
||||
})
|
||||
.collect();
|
||||
let results = self
|
||||
.api_client
|
||||
|
@ -76,7 +80,7 @@ impl DatabaseCollabService for TestDatabaseCollabService {
|
|||
.flat_map(|(object_id, result)| match result {
|
||||
Success { encode_collab_v1 } => match EncodedCollab::decode_from_bytes(&encode_collab_v1)
|
||||
{
|
||||
Ok(encode) => Some((object_id, encode)),
|
||||
Ok(encode) => Some((object_id.to_string(), encode)),
|
||||
Err(err) => {
|
||||
error!("Failed to decode collab: {}", err);
|
||||
None
|
||||
|
|
|
@ -61,7 +61,7 @@ pub struct TestClient {
|
|||
pub user: User,
|
||||
pub ws_client: WSClient,
|
||||
pub api_client: client_api::Client,
|
||||
pub collabs: HashMap<String, TestCollab>,
|
||||
pub collabs: HashMap<Uuid, TestCollab>,
|
||||
pub device_id: String,
|
||||
}
|
||||
pub struct TestCollab {
|
||||
|
@ -87,7 +87,7 @@ impl TestClient {
|
|||
Self::new_with_device_id(&device_id, registered_user, start_ws_conn).await
|
||||
}
|
||||
|
||||
pub async fn insert_into<S: Prelim>(&self, object_id: &str, key: &str, value: S) {
|
||||
pub async fn insert_into<S: Prelim>(&self, object_id: &Uuid, key: &str, value: S) {
|
||||
let mut lock = self.collabs.get(object_id).unwrap().collab.write().await;
|
||||
let collab = (*lock).borrow_mut();
|
||||
collab.insert(key, value);
|
||||
|
@ -143,14 +143,14 @@ impl TestClient {
|
|||
|
||||
pub async fn insert_view_to_general_space(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
view_id: &str,
|
||||
view_name: &str,
|
||||
view_layout: ViewLayout,
|
||||
) {
|
||||
let mut folder = self.get_folder(workspace_id).await;
|
||||
let mut folder = self.get_folder(*workspace_id).await;
|
||||
let general_space_id = folder
|
||||
.get_view(workspace_id)
|
||||
.get_view(&workspace_id.to_string())
|
||||
.unwrap()
|
||||
.children
|
||||
.first()
|
||||
|
@ -185,14 +185,14 @@ impl TestClient {
|
|||
.unwrap();
|
||||
}
|
||||
|
||||
pub async fn get_folder(&self, workspace_id: &str) -> Folder {
|
||||
pub async fn get_folder(&self, workspace_id: Uuid) -> Folder {
|
||||
let uid = self.uid().await;
|
||||
let folder_collab = self
|
||||
.api_client
|
||||
.get_collab(QueryCollabParams::new(
|
||||
workspace_id.to_string(),
|
||||
workspace_id,
|
||||
CollabType::Folder,
|
||||
workspace_id.to_string(),
|
||||
workspace_id,
|
||||
))
|
||||
.await
|
||||
.unwrap()
|
||||
|
@ -201,61 +201,56 @@ impl TestClient {
|
|||
uid,
|
||||
CollabOrigin::Client(CollabClient::new(uid, self.device_id.clone())),
|
||||
folder_collab.into(),
|
||||
workspace_id,
|
||||
&workspace_id.to_string(),
|
||||
vec![],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_database(&self, workspace_id: &str, database_id: &str) -> Database {
|
||||
pub async fn get_database(&self, workspace_id: Uuid, database_id: &str) -> Database {
|
||||
let service = TestDatabaseCollabService {
|
||||
api_client: self.api_client.clone(),
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
};
|
||||
let context = DatabaseContext::new(Arc::new(service));
|
||||
Database::open(database_id, context).await.unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_document(&self, workspace_id: &str, document_id: &str) -> Document {
|
||||
pub async fn get_document(&self, workspace_id: Uuid, document_id: Uuid) -> Document {
|
||||
let collab = self
|
||||
.get_collab_to_collab(
|
||||
workspace_id.to_string(),
|
||||
document_id.to_string(),
|
||||
CollabType::Document,
|
||||
)
|
||||
.get_collab_to_collab(workspace_id, document_id, CollabType::Document)
|
||||
.await
|
||||
.unwrap();
|
||||
Document::open(collab).unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_workspace_database(&self, workspace_id: &str) -> WorkspaceDatabase {
|
||||
pub async fn get_workspace_database(&self, workspace_id: Uuid) -> WorkspaceDatabase {
|
||||
let workspaces = self.api_client.get_workspaces().await.unwrap();
|
||||
let workspace_database_id = workspaces
|
||||
.iter()
|
||||
.find(|w| w.workspace_id.to_string() == workspace_id)
|
||||
.find(|w| w.workspace_id == workspace_id)
|
||||
.unwrap()
|
||||
.database_storage_id
|
||||
.to_string();
|
||||
.database_storage_id;
|
||||
|
||||
let collab = self
|
||||
.api_client
|
||||
.get_collab(QueryCollabParams::new(
|
||||
workspace_database_id.clone(),
|
||||
workspace_database_id,
|
||||
CollabType::WorkspaceDatabase,
|
||||
workspace_id.to_string(),
|
||||
workspace_id,
|
||||
))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
WorkspaceDatabase::from_collab_doc_state(
|
||||
&workspace_database_id,
|
||||
&workspace_database_id.to_string(),
|
||||
CollabOrigin::Empty,
|
||||
collab.encode_collab.into(),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_connect_users(&self, object_id: &str) -> Vec<i64> {
|
||||
pub async fn get_connect_users(&self, object_id: &Uuid) -> Vec<i64> {
|
||||
#[derive(Deserialize)]
|
||||
struct UserId {
|
||||
pub uid: i64,
|
||||
|
@ -275,14 +270,14 @@ impl TestClient {
|
|||
.collect()
|
||||
}
|
||||
|
||||
pub async fn clean_awareness_state(&self, object_id: &str) {
|
||||
pub async fn clean_awareness_state(&self, object_id: &Uuid) {
|
||||
let test_collab = self.collabs.get(object_id).unwrap();
|
||||
let mut lock = test_collab.collab.write().await;
|
||||
let collab = (*lock).borrow_mut();
|
||||
collab.clean_awareness_state();
|
||||
}
|
||||
|
||||
pub async fn emit_awareness_state(&self, object_id: &str) {
|
||||
pub async fn emit_awareness_state(&self, object_id: &Uuid) {
|
||||
let test_collab = self.collabs.get(object_id).unwrap();
|
||||
let mut lock = test_collab.collab.write().await;
|
||||
let collab = (*lock).borrow_mut();
|
||||
|
@ -297,7 +292,7 @@ impl TestClient {
|
|||
self.api_client.get_user_workspace_info().await.unwrap()
|
||||
}
|
||||
|
||||
pub async fn open_workspace(&self, workspace_id: &str) -> AFWorkspace {
|
||||
pub async fn open_workspace(&self, workspace_id: &Uuid) -> AFWorkspace {
|
||||
self.api_client.open_workspace(workspace_id).await.unwrap()
|
||||
}
|
||||
|
||||
|
@ -307,9 +302,9 @@ impl TestClient {
|
|||
let data = self
|
||||
.api_client
|
||||
.get_collab(QueryCollabParams::new(
|
||||
&workspace_id,
|
||||
workspace_id,
|
||||
CollabType::Folder,
|
||||
&workspace_id,
|
||||
workspace_id,
|
||||
))
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -318,20 +313,16 @@ impl TestClient {
|
|||
uid,
|
||||
CollabOrigin::Empty,
|
||||
data.encode_collab.into(),
|
||||
&workspace_id,
|
||||
&workspace_id.to_string(),
|
||||
vec![],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_workspace_database_collab(&self, workspace_id: &str) -> Collab {
|
||||
let db_storage_id = self.open_workspace(workspace_id).await.database_storage_id;
|
||||
pub async fn get_workspace_database_collab(&self, workspace_id: Uuid) -> Collab {
|
||||
let db_storage_id = self.open_workspace(&workspace_id).await.database_storage_id;
|
||||
let collab_resp = self
|
||||
.get_collab(
|
||||
workspace_id.to_string(),
|
||||
db_storage_id.to_string(),
|
||||
CollabType::WorkspaceDatabase,
|
||||
)
|
||||
.get_collab(workspace_id, db_storage_id, CollabType::WorkspaceDatabase)
|
||||
.await
|
||||
.unwrap();
|
||||
Collab::new_with_source(
|
||||
|
@ -344,18 +335,14 @@ impl TestClient {
|
|||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn create_document_collab(&self, workspace_id: &str, object_id: &str) -> Document {
|
||||
pub async fn create_document_collab(&self, workspace_id: Uuid, object_id: Uuid) -> Document {
|
||||
let collab_resp = self
|
||||
.get_collab(
|
||||
workspace_id.to_string(),
|
||||
object_id.to_string(),
|
||||
CollabType::Document,
|
||||
)
|
||||
.get_collab(workspace_id, object_id, CollabType::Document)
|
||||
.await
|
||||
.unwrap();
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
collab_resp.encode_collab.into(),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -364,7 +351,7 @@ impl TestClient {
|
|||
Document::open(collab).unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_db_collab_from_view(&mut self, workspace_id: &str, view_id: &str) -> Collab {
|
||||
pub async fn get_db_collab_from_view(&mut self, workspace_id: Uuid, view_id: &Uuid) -> Collab {
|
||||
let ws_db_collab = self.get_workspace_database_collab(workspace_id).await;
|
||||
let ws_db_body = WorkspaceDatabase::open(ws_db_collab).unwrap();
|
||||
let db_id = ws_db_body
|
||||
|
@ -372,18 +359,16 @@ impl TestClient {
|
|||
.into_iter()
|
||||
.find(|db_meta| db_meta.linked_views.contains(&view_id.to_string()))
|
||||
.unwrap()
|
||||
.database_id;
|
||||
.database_id
|
||||
.parse::<Uuid>()
|
||||
.unwrap();
|
||||
let db_collab_collab_resp = self
|
||||
.get_collab(
|
||||
workspace_id.to_string(),
|
||||
db_id.clone(),
|
||||
CollabType::Database,
|
||||
)
|
||||
.get_collab(workspace_id, db_id, CollabType::Database)
|
||||
.await
|
||||
.unwrap();
|
||||
Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
&db_id,
|
||||
&db_id.to_string(),
|
||||
db_collab_collab_resp.encode_collab.into(),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -394,19 +379,19 @@ impl TestClient {
|
|||
pub async fn get_user_awareness(&self) -> UserAwareness {
|
||||
let workspace_id = self.workspace_id().await;
|
||||
let profile = self.get_user_profile().await;
|
||||
let awareness_object_id = user_awareness_object_id(&profile.uuid, &workspace_id).to_string();
|
||||
let awareness_object_id = user_awareness_object_id(&profile.uuid, &workspace_id);
|
||||
let data = self
|
||||
.api_client
|
||||
.get_collab(QueryCollabParams::new(
|
||||
&awareness_object_id,
|
||||
awareness_object_id,
|
||||
CollabType::UserAwareness,
|
||||
&workspace_id,
|
||||
workspace_id,
|
||||
))
|
||||
.await
|
||||
.unwrap();
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
&awareness_object_id,
|
||||
&awareness_object_id.to_string(),
|
||||
DataSource::DocStateV1(data.encode_collab.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -418,11 +403,10 @@ impl TestClient {
|
|||
|
||||
pub async fn try_update_workspace_member(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
other_client: &TestClient,
|
||||
role: AFRole,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let workspace_id = Uuid::parse_str(workspace_id).unwrap().to_string();
|
||||
let email = other_client.email().await;
|
||||
self
|
||||
.api_client
|
||||
|
@ -435,7 +419,7 @@ impl TestClient {
|
|||
|
||||
pub async fn invite_and_accepted_workspace_member(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
other_client: &TestClient,
|
||||
role: AFRole,
|
||||
) -> Result<(), AppResponseError> {
|
||||
|
@ -462,7 +446,7 @@ impl TestClient {
|
|||
|
||||
let target_invitation = invitations
|
||||
.iter()
|
||||
.find(|inv| inv.workspace_id.to_string().as_str() == workspace_id)
|
||||
.find(|inv| &inv.workspace_id == workspace_id)
|
||||
.unwrap();
|
||||
|
||||
other_client
|
||||
|
@ -476,17 +460,17 @@ impl TestClient {
|
|||
|
||||
pub async fn try_remove_workspace_member(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
other_client: &TestClient,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let email = other_client.email().await;
|
||||
self
|
||||
.api_client
|
||||
.remove_workspace_members(workspace_id.to_string(), vec![email])
|
||||
.remove_workspace_members(workspace_id, vec![email])
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn get_workspace_members(&self, workspace_id: &str) -> Vec<AFWorkspaceMember> {
|
||||
pub async fn get_workspace_members(&self, workspace_id: &Uuid) -> Vec<AFWorkspaceMember> {
|
||||
self
|
||||
.api_client
|
||||
.get_workspace_members(workspace_id)
|
||||
|
@ -496,14 +480,14 @@ impl TestClient {
|
|||
|
||||
pub async fn try_get_workspace_members(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<Vec<AFWorkspaceMember>, AppResponseError> {
|
||||
self.api_client.get_workspace_members(workspace_id).await
|
||||
}
|
||||
|
||||
pub async fn get_workspace_member(&self, workspace_id: &str, user_id: i64) -> AFWorkspaceMember {
|
||||
pub async fn get_workspace_member(&self, workspace_id: Uuid, user_id: i64) -> AFWorkspaceMember {
|
||||
let params = QueryWorkspaceMember {
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
uid: user_id,
|
||||
};
|
||||
self.api_client.get_workspace_member(params).await.unwrap()
|
||||
|
@ -511,18 +495,18 @@ impl TestClient {
|
|||
|
||||
pub async fn try_get_workspace_member(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
user_id: i64,
|
||||
) -> Result<AFWorkspaceMember, AppResponseError> {
|
||||
let params = QueryWorkspaceMember {
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
uid: user_id,
|
||||
};
|
||||
|
||||
self.api_client.get_workspace_member(params).await
|
||||
}
|
||||
|
||||
pub async fn wait_object_sync_complete(&self, object_id: &str) -> Result<(), Error> {
|
||||
pub async fn wait_object_sync_complete(&self, object_id: &Uuid) -> Result<(), Error> {
|
||||
self
|
||||
.wait_object_sync_complete_with_secs(object_id, 60)
|
||||
.await
|
||||
|
@ -530,7 +514,7 @@ impl TestClient {
|
|||
|
||||
pub async fn wait_object_sync_complete_with_secs(
|
||||
&self,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
secs: u64,
|
||||
) -> Result<(), Error> {
|
||||
let mut sync_state = {
|
||||
|
@ -551,7 +535,7 @@ impl TestClient {
|
|||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_blob_metadata(&self, workspace_id: &str, file_id: &str) -> BlobMetadata {
|
||||
pub async fn get_blob_metadata(&self, workspace_id: &Uuid, file_id: &str) -> BlobMetadata {
|
||||
let url = self.api_client.get_blob_url(workspace_id, file_id);
|
||||
self.api_client.get_blob_metadata(&url).await.unwrap()
|
||||
}
|
||||
|
@ -577,7 +561,7 @@ impl TestClient {
|
|||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn workspace_id(&self) -> String {
|
||||
pub async fn workspace_id(&self) -> Uuid {
|
||||
self
|
||||
.api_client
|
||||
.get_workspaces()
|
||||
|
@ -586,7 +570,6 @@ impl TestClient {
|
|||
.first()
|
||||
.unwrap()
|
||||
.workspace_id
|
||||
.to_string()
|
||||
}
|
||||
|
||||
pub async fn email(&self) -> String {
|
||||
|
@ -603,7 +586,7 @@ impl TestClient {
|
|||
|
||||
pub async fn wait_until_all_embedding(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
query: Vec<EmbeddedCollabQuery>,
|
||||
) -> Vec<AFCollabEmbedInfo> {
|
||||
let timeout_duration = Duration::from_secs(30);
|
||||
|
@ -629,11 +612,11 @@ impl TestClient {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn wait_until_get_embedding(&self, workspace_id: &str, object_id: &str) {
|
||||
pub async fn wait_until_get_embedding(&self, workspace_id: &Uuid, object_id: &Uuid) {
|
||||
let result = timeout(Duration::from_secs(30), async {
|
||||
while self
|
||||
.api_client
|
||||
.get_collab_embed_info(workspace_id, object_id, CollabType::Document)
|
||||
.get_collab_embed_info(workspace_id, object_id)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
|
@ -641,7 +624,7 @@ impl TestClient {
|
|||
}
|
||||
self
|
||||
.api_client
|
||||
.get_collab_embed_info(workspace_id, object_id, CollabType::Document)
|
||||
.get_collab_embed_info(workspace_id, object_id)
|
||||
.await
|
||||
})
|
||||
.await;
|
||||
|
@ -655,7 +638,7 @@ impl TestClient {
|
|||
|
||||
pub async fn wait_unit_get_search_result(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
query: &str,
|
||||
limit: u32,
|
||||
) -> Vec<SearchDocumentResponseItem> {
|
||||
|
@ -681,14 +664,14 @@ impl TestClient {
|
|||
|
||||
pub async fn assert_similarity(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
input: &str,
|
||||
expected: &str,
|
||||
score: f64,
|
||||
use_embedding: bool,
|
||||
) {
|
||||
let params = CalculateSimilarityParams {
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id: *workspace_id,
|
||||
input: input.to_string(),
|
||||
expected: expected.to_string(),
|
||||
use_embedding,
|
||||
|
@ -706,8 +689,8 @@ impl TestClient {
|
|||
|
||||
pub async fn get_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
snapshot_id: &i64,
|
||||
) -> Result<SnapshotData, AppResponseError> {
|
||||
self
|
||||
|
@ -724,8 +707,8 @@ impl TestClient {
|
|||
|
||||
pub async fn create_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<AFSnapshotMeta, AppResponseError> {
|
||||
self
|
||||
|
@ -736,8 +719,8 @@ impl TestClient {
|
|||
|
||||
pub async fn get_snapshot_list(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
) -> Result<AFSnapshotMetas, AppResponseError> {
|
||||
self
|
||||
.api_client
|
||||
|
@ -747,8 +730,8 @@ impl TestClient {
|
|||
|
||||
pub async fn get_snapshot_list_until(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
f: impl Fn(&AFSnapshotMetas) -> bool + Send + Sync + 'static,
|
||||
timeout_secs: u64,
|
||||
) -> Result<AFSnapshotMetas, AppResponseError> {
|
||||
|
@ -772,7 +755,7 @@ impl TestClient {
|
|||
|
||||
pub async fn create_collab_list(
|
||||
&mut self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: Vec<CollabParams>,
|
||||
) -> Result<(), AppResponseError> {
|
||||
self
|
||||
|
@ -783,8 +766,8 @@ impl TestClient {
|
|||
|
||||
pub async fn get_collab(
|
||||
&self,
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<CollabResponse, AppResponseError> {
|
||||
self
|
||||
|
@ -801,16 +784,16 @@ impl TestClient {
|
|||
|
||||
pub async fn get_collab_to_collab(
|
||||
&self,
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<Collab, AppResponseError> {
|
||||
let resp = self
|
||||
.get_collab(workspace_id, object_id.clone(), collab_type)
|
||||
.get_collab(workspace_id, object_id, collab_type)
|
||||
.await?;
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
&object_id,
|
||||
&object_id.to_string(),
|
||||
resp.encode_collab.into(),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -821,7 +804,7 @@ impl TestClient {
|
|||
|
||||
pub async fn batch_get_collab(
|
||||
&mut self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: Vec<QueryCollab>,
|
||||
) -> Result<BatchQueryCollabResult, AppResponseError> {
|
||||
self.api_client.batch_get_collab(workspace_id, params).await
|
||||
|
@ -830,12 +813,12 @@ impl TestClient {
|
|||
#[allow(clippy::await_holding_lock)]
|
||||
pub async fn create_and_edit_collab(
|
||||
&mut self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> String {
|
||||
let object_id = Uuid::new_v4().to_string();
|
||||
) -> Uuid {
|
||||
let object_id = Uuid::new_v4();
|
||||
self
|
||||
.create_and_edit_collab_with_data(&object_id, workspace_id, collab_type, None)
|
||||
.create_and_edit_collab_with_data(object_id, workspace_id, collab_type, None)
|
||||
.await;
|
||||
object_id
|
||||
}
|
||||
|
@ -843,18 +826,18 @@ impl TestClient {
|
|||
#[allow(unused_variables)]
|
||||
pub async fn create_and_edit_collab_with_data(
|
||||
&mut self,
|
||||
object_id: &str,
|
||||
workspace_id: &str,
|
||||
object_id: Uuid,
|
||||
workspace_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
encoded_collab_v1: Option<EncodedCollab>,
|
||||
) {
|
||||
// Subscribe to object
|
||||
let origin = CollabOrigin::Client(CollabClient::new(self.uid().await, self.device_id.clone()));
|
||||
let mut collab = match encoded_collab_v1 {
|
||||
None => Collab::new_with_origin(origin.clone(), object_id, vec![], false),
|
||||
None => Collab::new_with_origin(origin.clone(), object_id.to_string(), vec![], false),
|
||||
Some(data) => Collab::new_with_source(
|
||||
origin.clone(),
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(data.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -872,10 +855,10 @@ impl TestClient {
|
|||
self
|
||||
.api_client
|
||||
.create_collab(CreateCollabParams {
|
||||
object_id: object_id.to_string(),
|
||||
object_id,
|
||||
encoded_collab_v1,
|
||||
collab_type,
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
@ -911,11 +894,11 @@ impl TestClient {
|
|||
collab.initialize();
|
||||
}
|
||||
let test_collab = TestCollab { origin, collab };
|
||||
self.collabs.insert(object_id.to_string(), test_collab);
|
||||
self.wait_object_sync_complete(object_id).await.unwrap();
|
||||
self.collabs.insert(object_id, test_collab);
|
||||
self.wait_object_sync_complete(&object_id).await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn open_workspace_collab(&mut self, workspace_id: &str) {
|
||||
pub async fn open_workspace_collab(&mut self, workspace_id: Uuid) {
|
||||
self
|
||||
.open_collab(workspace_id, workspace_id, CollabType::Unknown)
|
||||
.await;
|
||||
|
@ -924,8 +907,8 @@ impl TestClient {
|
|||
#[allow(clippy::await_holding_lock)]
|
||||
pub async fn open_collab(
|
||||
&mut self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) {
|
||||
self
|
||||
|
@ -936,8 +919,8 @@ impl TestClient {
|
|||
#[allow(unused_variables)]
|
||||
pub async fn open_collab_with_doc_state(
|
||||
&mut self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
doc_state: Vec<u8>,
|
||||
) {
|
||||
|
@ -945,7 +928,7 @@ impl TestClient {
|
|||
let origin = CollabOrigin::Client(CollabClient::new(self.uid().await, self.device_id.clone()));
|
||||
let mut collab = Collab::new_with_source(
|
||||
origin.clone(),
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(doc_state),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -985,14 +968,14 @@ impl TestClient {
|
|||
collab.initialize();
|
||||
}
|
||||
let test_collab = TestCollab { origin, collab };
|
||||
self.collabs.insert(object_id.to_string(), test_collab);
|
||||
self.collabs.insert(object_id, test_collab);
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
pub async fn create_collab_with_data(
|
||||
&mut self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
encoded_collab_v1: EncodedCollab,
|
||||
) -> Result<(), AppResponseError> {
|
||||
|
@ -1000,7 +983,7 @@ impl TestClient {
|
|||
let origin = CollabOrigin::Client(CollabClient::new(self.uid().await, self.device_id.clone()));
|
||||
let collab = Collab::new_with_source(
|
||||
origin.clone(),
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(encoded_collab_v1.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -1016,10 +999,10 @@ impl TestClient {
|
|||
self
|
||||
.api_client
|
||||
.create_collab(CreateCollabParams {
|
||||
object_id: object_id.to_string(),
|
||||
object_id,
|
||||
encoded_collab_v1,
|
||||
collab_type,
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
@ -1041,7 +1024,7 @@ impl TestClient {
|
|||
self.ws_client.connect().await.unwrap();
|
||||
}
|
||||
|
||||
pub async fn get_edit_collab_json(&self, object_id: &str) -> Value {
|
||||
pub async fn get_edit_collab_json(&self, object_id: &Uuid) -> Value {
|
||||
let lock = self.collabs.get(object_id).unwrap().collab.read().await;
|
||||
lock.to_json_value()
|
||||
}
|
||||
|
@ -1049,7 +1032,7 @@ impl TestClient {
|
|||
/// data: [(view_id, meta_json, blob_hex)]
|
||||
pub async fn publish_collabs(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
data: Vec<(Uuid, &str, &str)>,
|
||||
comments_enabled: bool,
|
||||
duplicate_enabled: bool,
|
||||
|
@ -1081,17 +1064,17 @@ impl TestClient {
|
|||
|
||||
pub async fn duplicate_published_to_workspace(
|
||||
&self,
|
||||
dest_workspace_id: &str,
|
||||
src_view_id: &str,
|
||||
dest_view_id: &str,
|
||||
dest_workspace_id: Uuid,
|
||||
src_view_id: Uuid,
|
||||
dest_view_id: Uuid,
|
||||
) {
|
||||
self
|
||||
.api_client
|
||||
.duplicate_published_to_workspace(
|
||||
dest_workspace_id,
|
||||
&PublishedDuplicate {
|
||||
published_view_id: src_view_id.to_string(),
|
||||
dest_view_id: dest_view_id.to_string(),
|
||||
published_view_id: src_view_id,
|
||||
dest_view_id,
|
||||
},
|
||||
)
|
||||
.await
|
||||
|
@ -1104,20 +1087,18 @@ impl TestClient {
|
|||
|
||||
pub async fn assert_server_snapshot(
|
||||
client: &client_api::Client,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
snapshot_id: &i64,
|
||||
expected: Value,
|
||||
) {
|
||||
let workspace_id = workspace_id.to_string();
|
||||
let object_id = object_id.to_string();
|
||||
let mut retry_count = 0;
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(Duration::from_secs(10)) => {
|
||||
panic!("Query snapshot timeout");
|
||||
},
|
||||
result = client.get_snapshot(&workspace_id, &object_id, QuerySnapshotParams {snapshot_id: *snapshot_id },
|
||||
result = client.get_snapshot(workspace_id, object_id, QuerySnapshotParams {snapshot_id: *snapshot_id },
|
||||
) => {
|
||||
retry_count += 1;
|
||||
match &result {
|
||||
|
@ -1126,7 +1107,7 @@ pub async fn assert_server_snapshot(
|
|||
EncodedCollab::decode_from_bytes(&snapshot_data.encoded_collab_v1).unwrap();
|
||||
let json = Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
&object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(encoded_collab_v1.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -1156,15 +1137,15 @@ pub async fn assert_server_snapshot(
|
|||
}
|
||||
|
||||
pub async fn assert_server_collab(
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
client: &mut client_api::Client,
|
||||
object_id: &str,
|
||||
collab_type: CollabType,
|
||||
object_id: Uuid,
|
||||
collab_type: &CollabType,
|
||||
timeout_secs: u64,
|
||||
expected: Value,
|
||||
) -> Result<(), Error> {
|
||||
let duration = Duration::from_secs(timeout_secs);
|
||||
let object_id = object_id.to_string();
|
||||
let collab_type = *collab_type;
|
||||
let final_json = Arc::new(Mutex::from(json!({})));
|
||||
|
||||
// Use tokio::time::timeout to apply a timeout to the entire operation
|
||||
|
@ -1172,18 +1153,14 @@ pub async fn assert_server_collab(
|
|||
let operation = async {
|
||||
loop {
|
||||
let result = client
|
||||
.get_collab(QueryCollabParams::new(
|
||||
&object_id,
|
||||
collab_type,
|
||||
workspace_id,
|
||||
))
|
||||
.get_collab(QueryCollabParams::new(object_id, collab_type, workspace_id))
|
||||
.await;
|
||||
|
||||
match &result {
|
||||
Ok(data) => {
|
||||
let json = Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
&object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(data.encode_collab.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -1219,12 +1196,11 @@ pub async fn assert_server_collab(
|
|||
|
||||
pub async fn assert_client_collab_within_secs(
|
||||
client: &mut TestClient,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
key: &str,
|
||||
expected: Value,
|
||||
secs: u64,
|
||||
) {
|
||||
let object_id = object_id.to_string();
|
||||
let mut retry_count = 0;
|
||||
loop {
|
||||
tokio::select! {
|
||||
|
@ -1234,7 +1210,7 @@ pub async fn assert_client_collab_within_secs(
|
|||
json = async {
|
||||
let lock = client
|
||||
.collabs
|
||||
.get_mut(&object_id)
|
||||
.get_mut(object_id)
|
||||
.unwrap()
|
||||
.collab
|
||||
.read()
|
||||
|
@ -1257,11 +1233,10 @@ pub async fn assert_client_collab_within_secs(
|
|||
|
||||
pub async fn assert_client_collab_include_value(
|
||||
client: &mut TestClient,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
expected: Value,
|
||||
) -> Result<(), Error> {
|
||||
let secs = 60;
|
||||
let object_id = object_id.to_string();
|
||||
let mut retry_count = 0;
|
||||
loop {
|
||||
tokio::select! {
|
||||
|
@ -1271,7 +1246,7 @@ pub async fn assert_client_collab_include_value(
|
|||
json = async {
|
||||
let lock = client
|
||||
.collabs
|
||||
.get_mut(&object_id)
|
||||
.get_mut(object_id)
|
||||
.unwrap()
|
||||
.collab
|
||||
.read()
|
||||
|
@ -1292,29 +1267,6 @@ pub async fn assert_client_collab_include_value(
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_collab_json_from_server(
|
||||
client: &client_api::Client,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
collab_type: CollabType,
|
||||
) -> Value {
|
||||
let bytes = client
|
||||
.get_collab(QueryCollabParams::new(object_id, collab_type, workspace_id))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
object_id,
|
||||
DataSource::DocStateV1(bytes.encode_collab.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap()
|
||||
.to_json_value()
|
||||
}
|
||||
|
||||
pub async fn collect_answer(mut stream: QuestionStream) -> String {
|
||||
let mut answer = String::new();
|
||||
while let Some(value) = stream.next().await {
|
||||
|
|
|
@ -12,6 +12,7 @@ use futures_util::{SinkExt, StreamExt};
|
|||
use tokio::select;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, instrument, trace, warn};
|
||||
use uuid::Uuid;
|
||||
use yrs::encoding::read::Cursor;
|
||||
use yrs::updates::decoder::DecoderV1;
|
||||
use yrs::updates::encoder::Encode;
|
||||
|
@ -31,7 +32,7 @@ pub type CollabRef = Weak<RwLock<dyn BorrowMut<Collab> + Send + Sync + 'static>>
|
|||
|
||||
/// Use to continuously receive updates from remote.
|
||||
pub struct ObserveCollab<Sink, Stream> {
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
#[allow(dead_code)]
|
||||
weak_collab: CollabRef,
|
||||
phantom_sink: PhantomData<Sink>,
|
||||
|
@ -62,7 +63,7 @@ where
|
|||
sink: Weak<CollabSink<Sink>>,
|
||||
periodic_sync_interval: Option<Duration>,
|
||||
) -> Self {
|
||||
let object_id = object.object_id.clone();
|
||||
let object_id = object.object_id;
|
||||
let cloned_weak_collab = weak_collab.clone() as CollabRef;
|
||||
let seq_num_counter = Arc::new(SeqNumCounter::default());
|
||||
let cloned_seq_num_counter = seq_num_counter.clone();
|
||||
|
@ -76,7 +77,7 @@ where
|
|||
sink.clone(),
|
||||
cloned_weak_collab.clone(),
|
||||
interval,
|
||||
object_id.clone(),
|
||||
object_id.to_string(),
|
||||
));
|
||||
}
|
||||
tokio::spawn(ObserveCollab::<Sink, Stream>::observer_collab_message(
|
||||
|
@ -339,7 +340,7 @@ where
|
|||
// before sending the SyncStep1 to the server.
|
||||
if is_server_sync_step_1 && sync_object.collab_type == CollabType::Folder {
|
||||
let lock = collab.read().await;
|
||||
validate_data_for_folder((*lock).borrow(), &sync_object.workspace_id)
|
||||
validate_data_for_folder((*lock).borrow(), &sync_object.workspace_id.to_string())
|
||||
.map_err(|err| SyncError::OverrideWithIncorrectData(err.to_string()))?;
|
||||
}
|
||||
|
||||
|
@ -347,19 +348,19 @@ where
|
|||
.handle_message(&message_origin, &collab, msg)
|
||||
.await?
|
||||
{
|
||||
let object_id = sync_object.object_id.clone();
|
||||
let object_id = sync_object.object_id;
|
||||
sink.queue_msg(|msg_id| {
|
||||
if is_server_sync_step_1 {
|
||||
ClientCollabMessage::new_server_init_sync(ServerInit::new(
|
||||
message_origin.clone(),
|
||||
object_id,
|
||||
object_id.to_string(),
|
||||
return_payload,
|
||||
msg_id,
|
||||
))
|
||||
} else {
|
||||
ClientCollabMessage::new_update_sync(UpdateSync::new(
|
||||
message_origin.clone(),
|
||||
object_id,
|
||||
object_id.to_string(),
|
||||
return_payload,
|
||||
msg_id,
|
||||
))
|
||||
|
@ -450,7 +451,7 @@ impl SeqNumCounter {
|
|||
/// messages may have been missed, and an error is returned.
|
||||
pub fn check_broadcast_contiguous(
|
||||
&self,
|
||||
_object_id: &str,
|
||||
_object_id: &Uuid,
|
||||
broadcast_seq_num: u32,
|
||||
) -> Result<(), SyncError> {
|
||||
let current = self.broadcast_seq_counter.load(Ordering::SeqCst);
|
||||
|
@ -467,7 +468,7 @@ impl SeqNumCounter {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_ack_broadcast_contiguous(&self, object_id: &str) -> Result<(), SyncError> {
|
||||
pub fn check_ack_broadcast_contiguous(&self, object_id: &Uuid) -> Result<(), SyncError> {
|
||||
let ack_seq_num = self.ack_seq_counter.load(Ordering::SeqCst);
|
||||
let broadcast_seq_num = self.broadcast_seq_counter.load(Ordering::SeqCst);
|
||||
if cfg!(feature = "sync_verbose_log") {
|
||||
|
|
|
@ -15,6 +15,7 @@ use tokio_retry::strategy::FixedInterval;
|
|||
use tokio_retry::{Action, Condition, RetryIf};
|
||||
use tokio_stream::StreamExt;
|
||||
use tracing::{error, trace};
|
||||
use uuid::Uuid;
|
||||
use yrs::updates::encoder::Encode;
|
||||
|
||||
use client_api_entity::{CollabObject, CollabType};
|
||||
|
@ -170,7 +171,7 @@ where
|
|||
self.sync_queue.queue_msg(|msg_id| {
|
||||
let update_sync = UpdateSync::new(
|
||||
origin.clone(),
|
||||
self.object.object_id.clone(),
|
||||
self.object.object_id.to_string(),
|
||||
payload,
|
||||
msg_id,
|
||||
);
|
||||
|
@ -219,36 +220,37 @@ where
|
|||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SyncObject {
|
||||
pub object_id: String,
|
||||
pub workspace_id: String,
|
||||
pub object_id: Uuid,
|
||||
pub workspace_id: Uuid,
|
||||
pub collab_type: CollabType,
|
||||
pub device_id: String,
|
||||
}
|
||||
|
||||
impl SyncObject {
|
||||
pub fn new(
|
||||
object_id: &str,
|
||||
workspace_id: &str,
|
||||
object_id: Uuid,
|
||||
workspace_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
device_id: &str,
|
||||
) -> Self {
|
||||
Self {
|
||||
object_id: object_id.to_string(),
|
||||
workspace_id: workspace_id.to_string(),
|
||||
object_id,
|
||||
workspace_id,
|
||||
collab_type,
|
||||
device_id: device_id.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<CollabObject> for SyncObject {
|
||||
fn from(collab_object: CollabObject) -> Self {
|
||||
Self {
|
||||
object_id: collab_object.object_id,
|
||||
workspace_id: collab_object.workspace_id,
|
||||
impl TryFrom<CollabObject> for SyncObject {
|
||||
type Error = anyhow::Error;
|
||||
fn try_from(collab_object: CollabObject) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
object_id: Uuid::parse_str(&collab_object.object_id)?,
|
||||
workspace_id: Uuid::parse_str(&collab_object.workspace_id)?,
|
||||
collab_type: collab_object.collab_type,
|
||||
device_id: collab_object.device_id,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,6 @@ where
|
|||
collab: CollabRef,
|
||||
periodic_sync: Option<Duration>,
|
||||
) -> Self {
|
||||
let protocol = ClientSyncProtocol;
|
||||
let (notifier, notifier_rx) = watch::channel(SinkSignal::Proceed);
|
||||
let (sync_state_tx, _) = broadcast::channel(10);
|
||||
debug_assert!(origin.client_user_id().is_some());
|
||||
|
@ -78,8 +77,6 @@ where
|
|||
tokio::spawn(CollabSinkRunner::run(Arc::downgrade(&sink), notifier_rx));
|
||||
|
||||
// Create the observe collab stream.
|
||||
let _cloned_protocol = protocol.clone();
|
||||
let _object_id = object.object_id.clone();
|
||||
let stream = ObserveCollab::new(
|
||||
origin.clone(),
|
||||
object.clone(),
|
||||
|
@ -209,9 +206,9 @@ where
|
|||
sink.queue_init_sync(|msg_id| {
|
||||
let init_sync = InitSync::new(
|
||||
origin,
|
||||
sync_object.object_id.clone(),
|
||||
sync_object.object_id.to_string(),
|
||||
sync_object.collab_type,
|
||||
sync_object.workspace_id.clone(),
|
||||
sync_object.workspace_id.to_string(),
|
||||
msg_id,
|
||||
payload,
|
||||
);
|
||||
|
@ -228,7 +225,7 @@ where
|
|||
sink.queue_msg(|msg_id| {
|
||||
let update_sync = UpdateSync::new(
|
||||
origin.clone(),
|
||||
sync_object.object_id.clone(),
|
||||
sync_object.object_id.to_string(),
|
||||
update,
|
||||
msg_id,
|
||||
);
|
||||
|
@ -250,9 +247,9 @@ where
|
|||
sink.queue_init_sync(|msg_id| {
|
||||
let init_sync = InitSync::new(
|
||||
origin,
|
||||
sync_object.object_id.clone(),
|
||||
sync_object.object_id.to_string(),
|
||||
sync_object.collab_type,
|
||||
sync_object.workspace_id.clone(),
|
||||
sync_object.workspace_id.to_string(),
|
||||
msg_id,
|
||||
payload,
|
||||
);
|
||||
|
|
|
@ -23,11 +23,15 @@ use parking_lot::RwLock;
|
|||
use reqwest::Method;
|
||||
use reqwest::RequestBuilder;
|
||||
|
||||
use crate::retry::{RefreshTokenAction, RefreshTokenRetryCondition};
|
||||
use crate::ws::ConnectInfo;
|
||||
use anyhow::anyhow;
|
||||
use client_api_entity::SignUpResponse::{Authenticated, NotAuthenticated};
|
||||
use client_api_entity::{
|
||||
AFSnapshotMeta, AFSnapshotMetas, AFUserProfile, AFUserWorkspaceInfo, AFWorkspace,
|
||||
QuerySnapshotParams, SnapshotData,
|
||||
};
|
||||
use client_api_entity::{GotrueTokenResponse, UpdateGotrueUserParams, User};
|
||||
use semver::Version;
|
||||
use shared_entity::dto::auth_dto::SignInTokenResponse;
|
||||
use shared_entity::dto::auth_dto::UpdateUserParams;
|
||||
|
@ -40,11 +44,7 @@ use tokio_retry::strategy::FixedInterval;
|
|||
use tokio_retry::RetryIf;
|
||||
use tracing::{debug, error, event, info, instrument, trace, warn};
|
||||
use url::Url;
|
||||
|
||||
use crate::retry::{RefreshTokenAction, RefreshTokenRetryCondition};
|
||||
use crate::ws::ConnectInfo;
|
||||
use client_api_entity::SignUpResponse::{Authenticated, NotAuthenticated};
|
||||
use client_api_entity::{GotrueTokenResponse, UpdateGotrueUserParams, User};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const X_COMPRESSION_TYPE: &str = "X-Compression-Type";
|
||||
pub const X_COMPRESSION_BUFFER_SIZE: &str = "X-Compression-Buffer-Size";
|
||||
|
@ -610,7 +610,7 @@ impl Client {
|
|||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn delete_workspace(&self, workspace_id: &str) -> Result<(), AppResponseError> {
|
||||
pub async fn delete_workspace(&self, workspace_id: &Uuid) -> Result<(), AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
.http_client_with_auth(Method::DELETE, &url)
|
||||
|
@ -692,9 +692,9 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn get_workspace_folder(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
depth: Option<u32>,
|
||||
root_view_id: Option<String>,
|
||||
root_view_id: Option<Uuid>,
|
||||
) -> Result<FolderView, AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/folder", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
|
@ -713,7 +713,7 @@ impl Client {
|
|||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn open_workspace(&self, workspace_id: &str) -> Result<AFWorkspace, AppResponseError> {
|
||||
pub async fn open_workspace(&self, workspace_id: &Uuid) -> Result<AFWorkspace, AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/open", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
.http_client_with_auth(Method::PUT, &url)
|
||||
|
@ -729,7 +729,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn get_workspace_favorite(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<FavoriteSectionItems, AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/favorite", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
|
@ -746,7 +746,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn get_workspace_recent(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<RecentSectionItems, AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/recent", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
|
@ -763,7 +763,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn get_workspace_trash(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<TrashSectionItems, AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/trash", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
|
@ -877,8 +877,8 @@ impl Client {
|
|||
|
||||
pub async fn get_snapshot_list(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
) -> Result<AFSnapshotMetas, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/{}/snapshot/list",
|
||||
|
@ -897,8 +897,8 @@ impl Client {
|
|||
|
||||
pub async fn get_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
params: QuerySnapshotParams,
|
||||
) -> Result<SnapshotData, AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -919,8 +919,8 @@ impl Client {
|
|||
|
||||
pub async fn create_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<AFSnapshotMeta, AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -959,7 +959,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn get_workspace_usage(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<WorkspaceSpaceUsage, AppResponseError> {
|
||||
let url = format!("{}/api/file_storage/{}/usage", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
|
@ -1151,7 +1151,7 @@ impl Client {
|
|||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub(crate) fn batch_create_collab_url(&self, workspace_id: &str) -> String {
|
||||
pub(crate) fn batch_create_collab_url(&self, workspace_id: &Uuid) -> String {
|
||||
format!(
|
||||
"{}/api/workspace/{}/batch/collab",
|
||||
self.base_url, workspace_id
|
||||
|
|
|
@ -11,6 +11,7 @@ use shared_entity::dto::ai_dto::{
|
|||
use shared_entity::response::{AppResponse, AppResponseError};
|
||||
use std::time::Duration;
|
||||
use tracing::instrument;
|
||||
use uuid::Uuid;
|
||||
|
||||
impl Client {
|
||||
pub async fn stream_completion_text(
|
||||
|
@ -31,7 +32,7 @@ impl Client {
|
|||
|
||||
pub async fn stream_completion_v2(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: CompleteTextParams,
|
||||
ai_model: Option<String>,
|
||||
) -> Result<CompletionStream, AppResponseError> {
|
||||
|
@ -120,7 +121,7 @@ impl Client {
|
|||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn get_model_list(&self, workspace_id: &str) -> Result<ModelList, AppResponseError> {
|
||||
pub async fn get_model_list(&self, workspace_id: &Uuid) -> Result<ModelList, AppResponseError> {
|
||||
let url = format!("{}/api/ai/{workspace_id}/model/list", self.base_url);
|
||||
let resp = self
|
||||
.http_client_with_auth(Method::GET, &url)
|
||||
|
|
|
@ -13,9 +13,10 @@ use shared_entity::response::{AppResponse, AppResponseError};
|
|||
use shared_entity::dto::file_dto::PutFileResponse;
|
||||
use tracing::instrument;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
impl Client {
|
||||
pub fn get_blob_url(&self, workspace_id: &str, file_id: &str) -> String {
|
||||
pub fn get_blob_url(&self, workspace_id: &Uuid, file_id: &str) -> String {
|
||||
format!(
|
||||
"{}/api/file_storage/{}/blob/{}",
|
||||
self.base_url, workspace_id, file_id
|
||||
|
@ -49,7 +50,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn put_blob_v1<T: Into<Bytes>>(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
parent_dir: &str,
|
||||
data: T,
|
||||
mime: &Mime,
|
||||
|
@ -101,7 +102,7 @@ impl Client {
|
|||
.await?
|
||||
.into_data()
|
||||
}
|
||||
pub fn get_blob_url_v1(&self, workspace_id: &str, parent_dir: &str, file_id: &str) -> String {
|
||||
pub fn get_blob_url_v1(&self, workspace_id: &Uuid, parent_dir: &str, file_id: &str) -> String {
|
||||
let parent_dir = utf8_percent_encode(parent_dir, NON_ALPHANUMERIC).to_string();
|
||||
format!(
|
||||
"{}/api/file_storage/{workspace_id}/v1/blob/{parent_dir}/{file_id}",
|
||||
|
@ -110,7 +111,7 @@ impl Client {
|
|||
}
|
||||
|
||||
/// Returns the workspace_id, parent_dir, and file_id from the given blob url.
|
||||
pub fn parse_blob_url_v1(&self, url: &str) -> Option<(String, String, String)> {
|
||||
pub fn parse_blob_url_v1(&self, url: &str) -> Option<(Uuid, String, String)> {
|
||||
let parsed_url = Url::parse(url).ok()?;
|
||||
let segments: Vec<&str> = parsed_url.path_segments()?.collect();
|
||||
// Check if the path has the expected number of segments
|
||||
|
@ -119,7 +120,7 @@ impl Client {
|
|||
}
|
||||
|
||||
// Extract the workspace_id, parent_dir, and file_id from the segments
|
||||
let workspace_id = segments[2].to_string();
|
||||
let workspace_id: Uuid = segments[2].parse().ok()?;
|
||||
let encoded_parent_dir = segments[5].to_string();
|
||||
let file_id = segments[6].to_string();
|
||||
|
||||
|
@ -135,7 +136,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all)]
|
||||
pub async fn get_blob_v1(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
parent_dir: &str,
|
||||
file_id: &str,
|
||||
) -> Result<(Mime, Vec<u8>), AppResponseError> {
|
||||
|
|
|
@ -21,12 +21,13 @@ use std::pin::Pin;
|
|||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tracing::error;
|
||||
use uuid::Uuid;
|
||||
|
||||
impl Client {
|
||||
/// Create a new chat
|
||||
pub async fn create_chat(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: CreateChatParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!("{}/api/chat/{workspace_id}", self.base_url);
|
||||
|
@ -42,7 +43,7 @@ impl Client {
|
|||
|
||||
pub async fn update_chat_settings(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
params: UpdateChatParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
|
@ -61,7 +62,7 @@ impl Client {
|
|||
}
|
||||
pub async fn get_chat_settings(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
) -> Result<ChatSettings, AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -82,7 +83,7 @@ impl Client {
|
|||
/// Delete a chat for given chat_id
|
||||
pub async fn delete_chat(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!("{}/api/chat/{workspace_id}/{chat_id}", self.base_url);
|
||||
|
@ -98,7 +99,7 @@ impl Client {
|
|||
/// Save a question message to a chat
|
||||
pub async fn create_question(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
params: CreateChatMessageParams,
|
||||
) -> Result<ChatMessage, AppResponseError> {
|
||||
|
@ -121,7 +122,7 @@ impl Client {
|
|||
/// save an answer message to a chat
|
||||
pub async fn save_answer(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
params: CreateAnswerMessageParams,
|
||||
) -> Result<ChatMessage, AppResponseError> {
|
||||
|
@ -143,7 +144,7 @@ impl Client {
|
|||
|
||||
pub async fn stream_answer_v2(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
question_id: i64,
|
||||
) -> Result<QuestionStream, AppResponseError> {
|
||||
|
@ -174,7 +175,7 @@ impl Client {
|
|||
|
||||
pub async fn stream_answer_v3(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
query: ChatQuestionQuery,
|
||||
chat_model: Option<String>,
|
||||
) -> Result<QuestionStream, AppResponseError> {
|
||||
|
@ -196,7 +197,7 @@ impl Client {
|
|||
|
||||
pub async fn get_answer(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
question_message_id: i64,
|
||||
) -> Result<ChatMessage, AppResponseError> {
|
||||
|
@ -219,7 +220,7 @@ impl Client {
|
|||
/// A message can be a question or an answer
|
||||
pub async fn update_chat_message(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
params: UpdateChatMessageContentParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
|
@ -240,7 +241,7 @@ impl Client {
|
|||
/// Get related question for a chat message. The message_d should be the question's id
|
||||
pub async fn get_chat_related_question(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
message_id: i64,
|
||||
) -> Result<RepeatedRelatedQuestion, AppResponseError> {
|
||||
|
@ -262,7 +263,7 @@ impl Client {
|
|||
/// Deprecated since v0.9.24. Return list of chat messages for a chat
|
||||
pub async fn get_chat_messages(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
offset: MessageCursor,
|
||||
limit: u64,
|
||||
|
@ -300,7 +301,7 @@ impl Client {
|
|||
/// as the author's uid, as author_uid will face precision issue in the browser environment.
|
||||
pub async fn get_chat_messages_with_author_uuid(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
offset: MessageCursor,
|
||||
limit: u64,
|
||||
|
@ -336,7 +337,7 @@ impl Client {
|
|||
|
||||
pub async fn get_question_message_from_answer_id(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
chat_id: &str,
|
||||
answer_message_id: i64,
|
||||
) -> Result<Option<ChatMessage>, AppResponseError> {
|
||||
|
|
|
@ -34,6 +34,7 @@ use std::time::Duration;
|
|||
use tokio_retry::strategy::ExponentialBackoff;
|
||||
use tokio_retry::{Action, Condition, RetryIf};
|
||||
use tracing::{event, instrument};
|
||||
use uuid::Uuid;
|
||||
|
||||
impl Client {
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
|
@ -86,8 +87,8 @@ impl Client {
|
|||
|
||||
pub async fn update_web_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
params: UpdateCollabWebParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -108,7 +109,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn batch_post_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: Vec<QueryCollab>,
|
||||
) -> Result<BatchQueryCollabResult, AppResponseError> {
|
||||
self
|
||||
|
@ -119,7 +120,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn batch_get_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: Vec<QueryCollab>,
|
||||
) -> Result<BatchQueryCollabResult, AppResponseError> {
|
||||
self
|
||||
|
@ -130,7 +131,7 @@ impl Client {
|
|||
async fn send_batch_collab_request(
|
||||
&self,
|
||||
method: Method,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: Vec<QueryCollab>,
|
||||
) -> Result<BatchQueryCollabResult, AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -169,7 +170,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn list_databases(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<Vec<AFDatabase>, AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/database", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
|
@ -183,7 +184,7 @@ impl Client {
|
|||
|
||||
pub async fn list_database_row_ids(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
database_id: &str,
|
||||
) -> Result<Vec<AFDatabaseRow>, AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -201,7 +202,7 @@ impl Client {
|
|||
|
||||
pub async fn get_database_fields(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
database_id: &str,
|
||||
) -> Result<Vec<AFDatabaseField>, AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -221,7 +222,7 @@ impl Client {
|
|||
// Returns the field id of the newly created field.
|
||||
pub async fn add_database_field(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
database_id: &str,
|
||||
insert_field: &AFInsertDatabaseField,
|
||||
) -> Result<String, AppResponseError> {
|
||||
|
@ -241,7 +242,7 @@ impl Client {
|
|||
|
||||
pub async fn list_database_row_ids_updated(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
database_id: &str,
|
||||
after: Option<DateTime<Utc>>,
|
||||
) -> Result<Vec<DatabaseRowUpdatedItem>, AppResponseError> {
|
||||
|
@ -261,7 +262,7 @@ impl Client {
|
|||
|
||||
pub async fn list_database_row_details(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
database_id: &str,
|
||||
row_ids: &[&str],
|
||||
with_doc: bool,
|
||||
|
@ -288,7 +289,7 @@ impl Client {
|
|||
/// Upon success, returns the row id for the newly created row.
|
||||
pub async fn add_database_item(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
database_id: &str,
|
||||
cells_by_id: HashMap<String, serde_json::Value>,
|
||||
row_doc_content: Option<String>,
|
||||
|
@ -315,7 +316,7 @@ impl Client {
|
|||
/// Creates the row if now exists, else row will be modified
|
||||
pub async fn upsert_database_item(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
database_id: &str,
|
||||
pre_hash: String,
|
||||
cells_by_id: HashMap<String, serde_json::Value>,
|
||||
|
@ -365,7 +366,7 @@ impl Client {
|
|||
#[instrument(level = "debug", skip_all, err)]
|
||||
pub async fn create_collab_list(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params_list: Vec<CollabParams>,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = self.batch_create_collab_url(workspace_id);
|
||||
|
@ -427,7 +428,7 @@ impl Client {
|
|||
|
||||
pub async fn publish_collabs<Metadata, Data>(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
items: Vec<PublishCollabItem<Metadata, Data>>,
|
||||
) -> Result<(), AppResponseError>
|
||||
where
|
||||
|
@ -447,9 +448,8 @@ impl Client {
|
|||
|
||||
pub async fn get_collab_embed_info(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
collab_type: CollabType,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
) -> Result<AFCollabEmbedInfo, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{workspace_id}/collab/{object_id}/embed-info",
|
||||
|
@ -459,7 +459,6 @@ impl Client {
|
|||
.http_client_with_auth(Method::GET, &url)
|
||||
.await?
|
||||
.header("Content-Type", "application/json")
|
||||
.query(&CollabTypeParam { collab_type })
|
||||
.send()
|
||||
.await?;
|
||||
log_request_id(&resp);
|
||||
|
@ -470,7 +469,7 @@ impl Client {
|
|||
|
||||
pub async fn batch_get_collab_embed_info(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: Vec<EmbeddedCollabQuery>,
|
||||
) -> Result<Vec<AFCollabEmbedInfo>, AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -492,8 +491,8 @@ impl Client {
|
|||
|
||||
pub async fn collab_full_sync(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
collab_type: CollabType,
|
||||
doc_state: Vec<u8>,
|
||||
state_vector: Vec<u8>,
|
||||
|
|
|
@ -23,11 +23,12 @@ use tokio::fs::File;
|
|||
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||
use tokio_util::codec::{BytesCodec, FramedRead};
|
||||
use tracing::{error, trace};
|
||||
use uuid::Uuid;
|
||||
|
||||
impl Client {
|
||||
pub async fn create_upload(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
req: CreateUploadRequest,
|
||||
) -> Result<CreateUploadResponse, AppResponseError> {
|
||||
trace!("create_upload: {}", req);
|
||||
|
@ -53,7 +54,7 @@ impl Client {
|
|||
/// which can be smaller.(https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html)
|
||||
pub async fn upload_part(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
parent_dir: &str,
|
||||
file_id: &str,
|
||||
upload_id: &str,
|
||||
|
@ -86,7 +87,7 @@ impl Client {
|
|||
|
||||
pub async fn complete_upload(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
req: CompleteUploadRequest,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
|
|
@ -9,10 +9,11 @@ use shared_entity::dto::workspace_dto::{
|
|||
};
|
||||
use shared_entity::response::{AppResponse, AppResponseError};
|
||||
use tracing::instrument;
|
||||
use uuid::Uuid;
|
||||
|
||||
impl Client {
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn leave_workspace(&self, workspace_id: &str) -> Result<(), AppResponseError> {
|
||||
pub async fn leave_workspace(&self, workspace_id: &Uuid) -> Result<(), AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/leave", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
.http_client_with_auth(Method::POST, &url)
|
||||
|
@ -25,15 +26,11 @@ impl Client {
|
|||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn get_workspace_members<W: AsRef<str>>(
|
||||
pub async fn get_workspace_members(
|
||||
&self,
|
||||
workspace_id: W,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<Vec<AFWorkspaceMember>, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/member",
|
||||
self.base_url,
|
||||
workspace_id.as_ref()
|
||||
);
|
||||
let url = format!("{}/api/workspace/{}/member", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
.http_client_with_auth(Method::GET, &url)
|
||||
.await?
|
||||
|
@ -48,7 +45,7 @@ impl Client {
|
|||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn invite_workspace_members(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
invitations: Vec<WorkspaceMemberInvitation>,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/invite", self.base_url, workspace_id);
|
||||
|
@ -137,16 +134,12 @@ impl Client {
|
|||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn update_workspace_member<T: AsRef<str>>(
|
||||
pub async fn update_workspace_member(
|
||||
&self,
|
||||
workspace_id: T,
|
||||
workspace_id: &Uuid,
|
||||
changeset: WorkspaceMemberChangeset,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/member",
|
||||
self.base_url,
|
||||
workspace_id.as_ref()
|
||||
);
|
||||
let url = format!("{}/api/workspace/{}/member", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
.http_client_with_auth(Method::PUT, &url)
|
||||
.await?
|
||||
|
@ -159,16 +152,12 @@ impl Client {
|
|||
}
|
||||
|
||||
#[instrument(level = "info", skip_all, err)]
|
||||
pub async fn remove_workspace_members<T: AsRef<str>>(
|
||||
pub async fn remove_workspace_members(
|
||||
&self,
|
||||
workspace_id: T,
|
||||
workspace_id: &Uuid,
|
||||
member_emails: Vec<String>,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/member",
|
||||
self.base_url,
|
||||
workspace_id.as_ref()
|
||||
);
|
||||
let url = format!("{}/api/workspace/{}/member", self.base_url, workspace_id);
|
||||
let payload = WorkspaceMembers::from(member_emails);
|
||||
let resp = self
|
||||
.http_client_with_auth(Method::DELETE, &url)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use crate::{log_request_id, Client};
|
||||
use bytes::Bytes;
|
||||
use client_api_entity::publish_dto::DuplicatePublishedPageResponse;
|
||||
use client_api_entity::workspace_dto::{PublishInfoView, PublishedView};
|
||||
|
@ -10,15 +11,14 @@ use client_api_entity::{
|
|||
use reqwest::Method;
|
||||
use shared_entity::response::{AppResponse, AppResponseError};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{log_request_id, Client};
|
||||
use uuid::Uuid;
|
||||
|
||||
// Publisher API
|
||||
impl Client {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn list_published_views(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<Vec<PublishInfoView>, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/published-info",
|
||||
|
@ -40,7 +40,7 @@ impl Client {
|
|||
/// or the original publish namespace if not exists.
|
||||
pub async fn set_workspace_publish_namespace(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
new_namespace: String,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let old_namespace = self.get_workspace_publish_namespace(workspace_id).await?;
|
||||
|
@ -65,7 +65,7 @@ impl Client {
|
|||
|
||||
pub async fn get_workspace_publish_namespace(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<String, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/publish-namespace",
|
||||
|
@ -84,7 +84,7 @@ impl Client {
|
|||
|
||||
pub async fn patch_published_collabs(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
patches: &[PatchPublishedCollab],
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/publish", self.base_url, workspace_id);
|
||||
|
@ -100,8 +100,8 @@ impl Client {
|
|||
|
||||
pub async fn unpublish_collabs(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
view_ids: &[uuid::Uuid],
|
||||
workspace_id: &Uuid,
|
||||
view_ids: &[Uuid],
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!("{}/api/workspace/{}/publish", self.base_url, workspace_id);
|
||||
let resp = self
|
||||
|
@ -116,9 +116,9 @@ impl Client {
|
|||
|
||||
pub async fn create_comment_on_published_view(
|
||||
&self,
|
||||
view_id: &uuid::Uuid,
|
||||
view_id: &Uuid,
|
||||
comment_content: &str,
|
||||
reply_comment_id: &Option<uuid::Uuid>,
|
||||
reply_comment_id: &Option<Uuid>,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/published-info/{}/comment",
|
||||
|
@ -139,8 +139,8 @@ impl Client {
|
|||
|
||||
pub async fn delete_comment_on_published_view(
|
||||
&self,
|
||||
view_id: &uuid::Uuid,
|
||||
comment_id: &uuid::Uuid,
|
||||
view_id: &Uuid,
|
||||
comment_id: &Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/published-info/{}/comment",
|
||||
|
@ -161,8 +161,8 @@ impl Client {
|
|||
pub async fn create_reaction_on_comment(
|
||||
&self,
|
||||
reaction_type: &str,
|
||||
view_id: &uuid::Uuid,
|
||||
comment_id: &uuid::Uuid,
|
||||
view_id: &Uuid,
|
||||
comment_id: &Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/published-info/{}/reaction",
|
||||
|
@ -184,8 +184,8 @@ impl Client {
|
|||
pub async fn delete_reaction_on_comment(
|
||||
&self,
|
||||
reaction_type: &str,
|
||||
view_id: &uuid::Uuid,
|
||||
comment_id: &uuid::Uuid,
|
||||
view_id: &Uuid,
|
||||
comment_id: &Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/published-info/{}/reaction",
|
||||
|
@ -206,8 +206,8 @@ impl Client {
|
|||
|
||||
pub async fn set_default_publish_view(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
view_id: uuid::Uuid,
|
||||
workspace_id: &Uuid,
|
||||
view_id: Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/publish-default",
|
||||
|
@ -225,7 +225,7 @@ impl Client {
|
|||
|
||||
pub async fn delete_default_publish_view(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/publish-default",
|
||||
|
@ -242,7 +242,7 @@ impl Client {
|
|||
|
||||
pub async fn get_default_publish_view_info(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<PublishInfo, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/publish-default",
|
||||
|
@ -264,7 +264,7 @@ impl Client {
|
|||
impl Client {
|
||||
pub async fn get_published_view_comments(
|
||||
&self,
|
||||
view_id: &uuid::Uuid,
|
||||
view_id: &Uuid,
|
||||
) -> Result<GlobalComments, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/published-info/{}/comment",
|
||||
|
@ -289,7 +289,7 @@ impl Client {
|
|||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn get_published_collab_info(
|
||||
&self,
|
||||
view_id: &uuid::Uuid,
|
||||
view_id: &Uuid,
|
||||
) -> Result<PublishInfo, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/v1/published-info/{}",
|
||||
|
@ -409,7 +409,7 @@ impl Client {
|
|||
|
||||
pub async fn duplicate_published_to_workspace(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
publish_duplicate: &PublishedDuplicate,
|
||||
) -> Result<DuplicatePublishedPageResponse, AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -430,8 +430,8 @@ impl Client {
|
|||
|
||||
pub async fn get_published_view_reactions(
|
||||
&self,
|
||||
view_id: &uuid::Uuid,
|
||||
comment_id: &Option<uuid::Uuid>,
|
||||
view_id: &Uuid,
|
||||
comment_id: &Option<Uuid>,
|
||||
) -> Result<Reactions, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/published-info/{}/reaction",
|
||||
|
|
|
@ -2,6 +2,7 @@ use app_error::ErrorCode;
|
|||
use reqwest::Method;
|
||||
use shared_entity::dto::search_dto::SearchDocumentResponseItem;
|
||||
use shared_entity::response::{AppResponse, AppResponseError};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::http::log_request_id;
|
||||
use crate::Client;
|
||||
|
@ -9,7 +10,7 @@ use crate::Client;
|
|||
impl Client {
|
||||
pub async fn search_documents(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
query: &str,
|
||||
limit: u32,
|
||||
preview_size: u32,
|
||||
|
|
|
@ -29,7 +29,7 @@ impl Client {
|
|||
pub async fn favorite_page_view(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
params: &FavoritePageParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -48,7 +48,7 @@ impl Client {
|
|||
pub async fn move_workspace_page_view(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
params: &MovePageParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -67,7 +67,7 @@ impl Client {
|
|||
pub async fn move_workspace_page_view_to_trash(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/page-view/{}/move-to-trash",
|
||||
|
@ -85,7 +85,7 @@ impl Client {
|
|||
pub async fn restore_workspace_page_view_from_trash(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/page-view/{}/restore-from-trash",
|
||||
|
@ -138,7 +138,7 @@ impl Client {
|
|||
pub async fn delete_workspace_page_view_from_trash(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/trash/{}",
|
||||
|
@ -172,7 +172,7 @@ impl Client {
|
|||
pub async fn update_workspace_page_view(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
params: &UpdatePageParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -191,7 +191,7 @@ impl Client {
|
|||
pub async fn get_workspace_page_view(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
) -> Result<PageCollab, AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/page-view/{}",
|
||||
|
@ -210,7 +210,7 @@ impl Client {
|
|||
pub async fn publish_page(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
params: &PublishPageParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -229,7 +229,7 @@ impl Client {
|
|||
pub async fn unpublish_page(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
"{}/api/workspace/{}/page-view/{}/unpublish",
|
||||
|
@ -262,7 +262,7 @@ impl Client {
|
|||
pub async fn update_space(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
params: &UpdateSpaceParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -281,7 +281,7 @@ impl Client {
|
|||
pub async fn append_block_to_page(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
params: &AppendBlockToPageParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -300,7 +300,7 @@ impl Client {
|
|||
pub async fn create_database_view(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
params: &CreatePageDatabaseViewParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
@ -319,7 +319,7 @@ impl Client {
|
|||
pub async fn duplicate_view_and_children(
|
||||
&self,
|
||||
workspace_id: Uuid,
|
||||
view_id: &str,
|
||||
view_id: &Uuid,
|
||||
params: &DuplicatePageParams,
|
||||
) -> Result<(), AppResponseError> {
|
||||
let url = format!(
|
||||
|
|
|
@ -18,6 +18,7 @@ tracing.workspace = true
|
|||
async-trait.workspace = true
|
||||
tokio = "1.36.0"
|
||||
collab-entity.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
[features]
|
||||
verbose_log = []
|
|
@ -5,9 +5,10 @@ use collab::entity::EncodedCollab;
|
|||
use collab::preclude::Collab;
|
||||
use collab_entity::CollabType;
|
||||
use tracing::instrument;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[inline]
|
||||
pub async fn collab_from_encode_collab(object_id: &str, data: &[u8]) -> Result<Collab, Error> {
|
||||
pub async fn collab_from_encode_collab(object_id: &Uuid, data: &[u8]) -> Result<Collab, Error> {
|
||||
let object_id = object_id.to_string();
|
||||
let data = data.to_vec();
|
||||
|
||||
|
@ -29,7 +30,7 @@ pub async fn collab_from_encode_collab(object_id: &str, data: &[u8]) -> Result<C
|
|||
#[instrument(level = "trace", skip(data), fields(len = %data.len()))]
|
||||
#[inline]
|
||||
pub async fn validate_encode_collab(
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
data: &[u8],
|
||||
collab_type: &CollabType,
|
||||
) -> Result<(), Error> {
|
||||
|
|
|
@ -52,8 +52,8 @@ impl AwarenessGossip {
|
|||
|
||||
pub async fn sink(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
) -> Result<AwarenessUpdateSink, StreamError> {
|
||||
let sink = AwarenessUpdateSink::new(self.conn.clone(), workspace_id, object_id);
|
||||
Ok(sink)
|
||||
|
@ -87,7 +87,7 @@ pub struct AwarenessUpdateSink {
|
|||
}
|
||||
|
||||
impl AwarenessUpdateSink {
|
||||
pub fn new(conn: MultiplexedConnection, workspace_id: &str, object_id: &str) -> Self {
|
||||
pub fn new(conn: MultiplexedConnection, workspace_id: &Uuid, object_id: &Uuid) -> Self {
|
||||
let publish_key = format!("af:awareness:{workspace_id}:{object_id}");
|
||||
AwarenessUpdateSink {
|
||||
conn: conn.into(),
|
||||
|
@ -131,10 +131,7 @@ mod test {
|
|||
for _ in 0..COLLAB_COUNT {
|
||||
let workspace_id = Uuid::new_v4();
|
||||
let object_id = Uuid::new_v4();
|
||||
let sink = gossip
|
||||
.sink(&workspace_id.to_string(), &object_id.to_string())
|
||||
.await
|
||||
.unwrap();
|
||||
let sink = gossip.sink(&workspace_id, &object_id).await.unwrap();
|
||||
let stream = gossip.awareness_stream(&object_id);
|
||||
collabs.push((sink, stream));
|
||||
}
|
||||
|
|
|
@ -72,15 +72,15 @@ impl CollabRedisStream {
|
|||
.await
|
||||
}
|
||||
|
||||
pub fn collab_update_sink(&self, workspace_id: &str, object_id: &str) -> CollabUpdateSink {
|
||||
pub fn collab_update_sink(&self, workspace_id: &Uuid, object_id: &Uuid) -> CollabUpdateSink {
|
||||
let stream_key = CollabStreamUpdate::stream_key(workspace_id, object_id);
|
||||
CollabUpdateSink::new(self.connection_manager.clone(), stream_key)
|
||||
}
|
||||
|
||||
pub async fn awareness_update_sink(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
) -> Result<AwarenessUpdateSink, StreamError> {
|
||||
self.awareness_gossip.sink(workspace_id, object_id).await
|
||||
}
|
||||
|
@ -89,8 +89,8 @@ impl CollabRedisStream {
|
|||
/// from a given message id. Once Redis stream return no more results, the stream will be closed.
|
||||
pub async fn current_collab_updates(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
since: Option<MessageId>,
|
||||
) -> Result<Vec<(MessageId, CollabStreamUpdate)>, StreamError> {
|
||||
let stream_key = CollabStreamUpdate::stream_key(workspace_id, object_id);
|
||||
|
@ -115,8 +115,8 @@ impl CollabRedisStream {
|
|||
/// coming from corresponding Redis stream until explicitly closed.
|
||||
pub fn live_collab_updates(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
since: Option<MessageId>,
|
||||
) -> impl Stream<Item = Result<(MessageId, CollabStreamUpdate), StreamError>> {
|
||||
let stream_key = CollabStreamUpdate::stream_key(workspace_id, object_id);
|
||||
|
|
|
@ -7,6 +7,7 @@ use serde::{Deserialize, Serialize};
|
|||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::str::FromStr;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// The [MessageId] generated by XADD has two parts: a timestamp and a sequence number, separated by
|
||||
/// a hyphen (-). The timestamp is based on the server's time when the message is added, and the
|
||||
|
@ -111,7 +112,7 @@ impl CollabStreamUpdate {
|
|||
}
|
||||
|
||||
/// Returns Redis stream key, that's storing entries mapped to/from [CollabStreamUpdate].
|
||||
pub fn stream_key(workspace_id: &str, object_id: &str) -> String {
|
||||
pub fn stream_key(workspace_id: &Uuid, object_id: &Uuid) -> String {
|
||||
// use `:` separator as it adheres to Redis naming conventions
|
||||
format!("af:{}:{}:updates", workspace_id, object_id)
|
||||
}
|
||||
|
|
|
@ -19,16 +19,36 @@ use tracing::error;
|
|||
use uuid::Uuid;
|
||||
use validator::Validate;
|
||||
|
||||
mod uuid_str {
|
||||
use serde::Deserialize;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub fn serialize<S>(uuid: &Uuid, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(&uuid.to_string())
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Uuid, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
Uuid::parse_str(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
/// The default compression level of ZSTD-compressed collabs.
|
||||
pub const ZSTD_COMPRESSION_LEVEL: i32 = 3;
|
||||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct CreateCollabParams {
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub workspace_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
|
||||
#[validate(custom(function = "validate_not_empty_payload"))]
|
||||
pub encoded_collab_v1: Vec<u8>,
|
||||
|
@ -36,8 +56,8 @@ pub struct CreateCollabParams {
|
|||
pub collab_type: CollabType,
|
||||
}
|
||||
|
||||
impl From<(String, CollabParams)> for CreateCollabParams {
|
||||
fn from((workspace_id, collab_params): (String, CollabParams)) -> Self {
|
||||
impl From<(Uuid, CollabParams)> for CreateCollabParams {
|
||||
fn from((workspace_id, collab_params): (Uuid, CollabParams)) -> Self {
|
||||
Self {
|
||||
workspace_id,
|
||||
object_id: collab_params.object_id,
|
||||
|
@ -48,7 +68,7 @@ impl From<(String, CollabParams)> for CreateCollabParams {
|
|||
}
|
||||
|
||||
impl CreateCollabParams {
|
||||
pub fn split(self) -> (CollabParams, String) {
|
||||
pub fn split(self) -> (CollabParams, Uuid) {
|
||||
(
|
||||
CollabParams {
|
||||
object_id: self.object_id,
|
||||
|
@ -70,13 +90,13 @@ impl CreateCollabParams {
|
|||
pub struct CollabIndexParams {}
|
||||
|
||||
pub struct PendingCollabWrite {
|
||||
pub workspace_id: String,
|
||||
pub workspace_id: Uuid,
|
||||
pub uid: i64,
|
||||
pub params: CollabParams,
|
||||
}
|
||||
|
||||
impl PendingCollabWrite {
|
||||
pub fn new(workspace_id: String, uid: i64, params: CollabParams) -> Self {
|
||||
pub fn new(workspace_id: Uuid, uid: i64, params: CollabParams) -> Self {
|
||||
PendingCollabWrite {
|
||||
workspace_id,
|
||||
uid,
|
||||
|
@ -87,8 +107,8 @@ impl PendingCollabWrite {
|
|||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize, PartialEq)]
|
||||
pub struct CollabParams {
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
#[validate(custom(function = "validate_not_empty_payload"))]
|
||||
pub encoded_collab_v1: Bytes,
|
||||
pub collab_type: CollabType,
|
||||
|
@ -107,12 +127,11 @@ impl Display for CollabParams {
|
|||
}
|
||||
|
||||
impl CollabParams {
|
||||
pub fn new<T: ToString, B: Into<Bytes>>(
|
||||
object_id: T,
|
||||
pub fn new<B: Into<Bytes>>(
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
encoded_collab_v1: B,
|
||||
) -> Self {
|
||||
let object_id = object_id.to_string();
|
||||
Self {
|
||||
object_id,
|
||||
collab_type,
|
||||
|
@ -141,7 +160,7 @@ impl CollabParams {
|
|||
|
||||
pub fn to_proto(&self) -> proto::collab::CollabParams {
|
||||
proto::collab::CollabParams {
|
||||
object_id: self.object_id.clone(),
|
||||
object_id: self.object_id.to_string(),
|
||||
encoded_collab: self.encoded_collab_v1.to_vec(),
|
||||
collab_type: self.collab_type.to_proto() as i32,
|
||||
embeddings: None,
|
||||
|
@ -167,7 +186,8 @@ impl TryFrom<proto::collab::CollabParams> for CollabParams {
|
|||
let collab_type_proto = proto::collab::CollabType::try_from(proto.collab_type).unwrap();
|
||||
let collab_type = CollabType::from_proto(&collab_type_proto);
|
||||
Ok(Self {
|
||||
object_id: proto.object_id,
|
||||
object_id: Uuid::from_str(&proto.object_id)
|
||||
.map_err(|e| EntityError::DeserializationError(e.to_string()))?,
|
||||
encoded_collab_v1: Bytes::from(proto.encoded_collab),
|
||||
collab_type,
|
||||
})
|
||||
|
@ -176,7 +196,8 @@ impl TryFrom<proto::collab::CollabParams> for CollabParams {
|
|||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct CollabParamsV0 {
|
||||
object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
object_id: Uuid,
|
||||
encoded_collab_v1: Vec<u8>,
|
||||
collab_type: CollabType,
|
||||
}
|
||||
|
@ -206,28 +227,30 @@ pub struct UpdateCollabWebParams {
|
|||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct DeleteCollabParams {
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub object_id: String,
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub workspace_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Validate)]
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct InsertSnapshotParams {
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
#[validate(custom(function = "validate_not_empty_payload"))]
|
||||
pub doc_state: Bytes,
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub workspace_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
pub collab_type: CollabType,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SnapshotData {
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
pub encoded_collab_v1: Vec<u8>,
|
||||
pub workspace_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
|
@ -237,8 +260,8 @@ pub struct QuerySnapshotParams {
|
|||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct QueryCollabParams {
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub workspace_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
#[validate(nested)]
|
||||
pub inner: QueryCollab,
|
||||
}
|
||||
|
@ -254,13 +277,7 @@ impl Display for QueryCollabParams {
|
|||
}
|
||||
|
||||
impl QueryCollabParams {
|
||||
pub fn new<T1: Into<String>, T2: Into<String>>(
|
||||
object_id: T1,
|
||||
collab_type: CollabType,
|
||||
workspace_id: T2,
|
||||
) -> Self {
|
||||
let workspace_id = workspace_id.into();
|
||||
let object_id = object_id.into();
|
||||
pub fn new(object_id: Uuid, collab_type: CollabType, workspace_id: Uuid) -> Self {
|
||||
let inner = QueryCollab {
|
||||
object_id,
|
||||
collab_type,
|
||||
|
@ -282,13 +299,11 @@ impl Deref for QueryCollabParams {
|
|||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct QueryCollab {
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
pub collab_type: CollabType,
|
||||
}
|
||||
impl QueryCollab {
|
||||
pub fn new<T: ToString>(object_id: T, collab_type: CollabType) -> Self {
|
||||
let object_id = object_id.to_string();
|
||||
pub fn new(object_id: Uuid, collab_type: CollabType) -> Self {
|
||||
Self {
|
||||
object_id,
|
||||
collab_type,
|
||||
|
@ -325,7 +340,8 @@ pub struct AFSnapshotMetas(pub Vec<AFSnapshotMeta>);
|
|||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct QueryObjectSnapshotParams {
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
|
@ -346,7 +362,7 @@ pub enum QueryCollabResult {
|
|||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct BatchQueryCollabResult(pub HashMap<String, QueryCollabResult>);
|
||||
pub struct BatchQueryCollabResult(pub HashMap<Uuid, QueryCollabResult>);
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct WorkspaceUsage {
|
||||
|
@ -356,10 +372,10 @@ pub struct WorkspaceUsage {
|
|||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct InsertCollabMemberParams {
|
||||
pub uid: i64,
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub workspace_id: String,
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
pub access_level: AFAccessLevel,
|
||||
}
|
||||
|
||||
|
@ -368,10 +384,10 @@ pub type UpdateCollabMemberParams = InsertCollabMemberParams;
|
|||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct WorkspaceCollabIdentify {
|
||||
pub uid: i64,
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub workspace_id: String,
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
@ -393,23 +409,23 @@ pub struct DefaultPublishViewInfoMeta {
|
|||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct QueryCollabMembers {
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub workspace_id: String,
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
pub struct QueryWorkspaceMember {
|
||||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub workspace_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
|
||||
pub uid: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct AFCollabEmbedInfo {
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
/// The timestamp when the object's embeddings updated
|
||||
pub indexed_at: DateTime<Utc>,
|
||||
/// The timestamp when the object's data updated
|
||||
|
@ -743,7 +759,8 @@ impl From<i16> for AFWorkspaceInvitationStatus {
|
|||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AFCollabEmbeddedChunk {
|
||||
pub fragment_id: String,
|
||||
pub object_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
pub content_type: EmbeddingContentType,
|
||||
pub content: String,
|
||||
pub embedding: Option<Vec<f32>>,
|
||||
|
@ -1217,7 +1234,7 @@ mod test {
|
|||
#[test]
|
||||
fn collab_params_serialization_from_old_format() {
|
||||
let v0 = CollabParamsV0 {
|
||||
object_id: Uuid::new_v4().to_string(),
|
||||
object_id: Uuid::new_v4(),
|
||||
collab_type: CollabType::Document,
|
||||
encoded_collab_v1: vec![
|
||||
7, 0, 0, 0, 0, 0, 0, 0, 1, 209, 196, 206, 243, 15, 1, 26, 4, 0, 0, 0, 0, 0, 0, 1, 1, 209,
|
||||
|
@ -1285,7 +1302,7 @@ mod test {
|
|||
#[test]
|
||||
fn deserialization_using_protobuf() {
|
||||
let collab_params_with_embeddings = CollabParams {
|
||||
object_id: "object_id".to_string(),
|
||||
object_id: Uuid::new_v4(),
|
||||
collab_type: CollabType::Document,
|
||||
encoded_collab_v1: Bytes::default(),
|
||||
};
|
||||
|
@ -1298,7 +1315,7 @@ mod test {
|
|||
#[test]
|
||||
fn deserialize_collab_params_without_embeddings() {
|
||||
let collab_params = CollabParams {
|
||||
object_id: "object_id".to_string(),
|
||||
object_id: Uuid::new_v4(),
|
||||
collab_type: CollabType::Document,
|
||||
encoded_collab_v1: Bytes::from(vec![1, 2, 3]),
|
||||
};
|
||||
|
|
|
@ -21,11 +21,10 @@ use uuid::Uuid;
|
|||
|
||||
pub async fn insert_chat<'a, E: Executor<'a, Database = Postgres>>(
|
||||
executor: E,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: CreateChatParams,
|
||||
) -> Result<(), AppError> {
|
||||
let chat_id = Uuid::from_str(¶ms.chat_id)?;
|
||||
let workspace_id = Uuid::from_str(workspace_id)?;
|
||||
let rag_ids = json!(params.rag_ids);
|
||||
sqlx::query!(
|
||||
r#"
|
||||
|
|
|
@ -15,7 +15,7 @@ use chrono::{DateTime, Duration, Utc};
|
|||
use sqlx::{Error, Executor, PgPool, Postgres, Transaction};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::fmt::Debug;
|
||||
use std::{ops::DerefMut, str::FromStr};
|
||||
use std::ops::DerefMut;
|
||||
use tracing::{error, instrument};
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -46,12 +46,10 @@ use uuid::Uuid;
|
|||
pub async fn insert_into_af_collab(
|
||||
tx: &mut Transaction<'_, sqlx::Postgres>,
|
||||
uid: &i64,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: &CollabParams,
|
||||
) -> Result<(), AppError> {
|
||||
let encrypt = 0;
|
||||
let partition_key = crate::collab::partition_key_from_collab_type(¶ms.collab_type);
|
||||
let workspace_id = Uuid::from_str(workspace_id)?;
|
||||
tracing::trace!(
|
||||
"upsert collab:{}, len:{}",
|
||||
params.object_id,
|
||||
|
@ -60,15 +58,14 @@ pub async fn insert_into_af_collab(
|
|||
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO af_collab (oid, blob, len, partition_key, encrypt, owner_uid, workspace_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7) ON CONFLICT (oid, partition_key)
|
||||
DO UPDATE SET blob = $2, len = $3, encrypt = $5, owner_uid = $6 WHERE excluded.workspace_id = af_collab.workspace_id;
|
||||
INSERT INTO af_collab (oid, blob, len, partition_key, owner_uid, workspace_id)
|
||||
VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (oid)
|
||||
DO UPDATE SET blob = $2, len = $3, owner_uid = $5 WHERE excluded.workspace_id = af_collab.workspace_id;
|
||||
"#,
|
||||
params.object_id,
|
||||
params.encoded_collab_v1.as_ref(),
|
||||
params.encoded_collab_v1.len() as i32,
|
||||
partition_key,
|
||||
encrypt,
|
||||
uid,
|
||||
workspace_id,
|
||||
)
|
||||
|
@ -142,25 +139,22 @@ pub async fn insert_into_af_collab(
|
|||
pub async fn insert_into_af_collab_bulk_for_user(
|
||||
tx: &mut Transaction<'_, Postgres>,
|
||||
uid: &i64,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
collab_params_list: &[CollabParams],
|
||||
) -> Result<(), AppError> {
|
||||
if collab_params_list.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let encrypt = 0;
|
||||
let workspace_uuid = Uuid::from_str(workspace_id)?;
|
||||
|
||||
// Insert values into `af_collab` tables in bulk
|
||||
let len = collab_params_list.len();
|
||||
let mut object_ids: Vec<Uuid> = Vec::with_capacity(len);
|
||||
let mut object_ids = Vec::with_capacity(len);
|
||||
let mut blobs: Vec<Vec<u8>> = Vec::with_capacity(len);
|
||||
let mut lengths: Vec<i32> = Vec::with_capacity(len);
|
||||
let mut partition_keys: Vec<i32> = Vec::with_capacity(len);
|
||||
let mut visited = HashSet::with_capacity(collab_params_list.len());
|
||||
for params in collab_params_list {
|
||||
let oid = Uuid::from_str(¶ms.object_id)?;
|
||||
let oid = params.object_id;
|
||||
if visited.insert(oid) {
|
||||
let partition_key = partition_key_from_collab_type(¶ms.collab_type);
|
||||
object_ids.push(oid);
|
||||
|
@ -171,20 +165,19 @@ pub async fn insert_into_af_collab_bulk_for_user(
|
|||
}
|
||||
|
||||
let uids: Vec<i64> = vec![*uid; object_ids.len()];
|
||||
let workspace_ids: Vec<Uuid> = vec![workspace_uuid; object_ids.len()];
|
||||
let workspace_ids: Vec<Uuid> = vec![workspace_id; object_ids.len()];
|
||||
// Bulk insert into `af_collab` for the provided collab params
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO af_collab (oid, blob, len, partition_key, encrypt, owner_uid, workspace_id)
|
||||
SELECT * FROM UNNEST($1::uuid[], $2::bytea[], $3::int[], $4::int[], $5::int[], $6::bigint[], $7::uuid[])
|
||||
ON CONFLICT (oid, partition_key)
|
||||
DO UPDATE SET blob = excluded.blob, len = excluded.len, encrypt = excluded.encrypt where af_collab.workspace_id = excluded.workspace_id
|
||||
INSERT INTO af_collab (oid, blob, len, partition_key, owner_uid, workspace_id)
|
||||
SELECT * FROM UNNEST($1::uuid[], $2::bytea[], $3::int[], $4::int[], $5::bigint[], $6::uuid[])
|
||||
ON CONFLICT (oid)
|
||||
DO UPDATE SET blob = excluded.blob, len = excluded.len where af_collab.workspace_id = excluded.workspace_id
|
||||
"#,
|
||||
&object_ids,
|
||||
&blobs,
|
||||
&lengths,
|
||||
&partition_keys,
|
||||
&vec![encrypt; object_ids.len()],
|
||||
&uids,
|
||||
&workspace_ids
|
||||
)
|
||||
|
@ -205,7 +198,7 @@ pub async fn insert_into_af_collab_bulk_for_user(
|
|||
pub async fn select_blob_from_af_collab<'a, E>(
|
||||
conn: E,
|
||||
collab_type: &CollabType,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
) -> Result<Vec<u8>, sqlx::Error>
|
||||
where
|
||||
E: Executor<'a, Database = Postgres>,
|
||||
|
@ -227,7 +220,7 @@ where
|
|||
#[inline]
|
||||
pub async fn select_collab_meta_from_af_collab<'a, E>(
|
||||
conn: E,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
collab_type: &CollabType,
|
||||
) -> Result<Option<AFCollabRowMeta>, sqlx::Error>
|
||||
where
|
||||
|
@ -252,9 +245,9 @@ where
|
|||
pub async fn batch_select_collab_blob(
|
||||
pg_pool: &PgPool,
|
||||
queries: Vec<QueryCollab>,
|
||||
results: &mut HashMap<String, QueryCollabResult>,
|
||||
results: &mut HashMap<Uuid, QueryCollabResult>,
|
||||
) {
|
||||
let mut object_ids_by_collab_type: HashMap<CollabType, Vec<String>> = HashMap::new();
|
||||
let mut object_ids_by_collab_type: HashMap<CollabType, Vec<Uuid>> = HashMap::new();
|
||||
for params in queries {
|
||||
object_ids_by_collab_type
|
||||
.entry(params.collab_type)
|
||||
|
@ -262,17 +255,15 @@ pub async fn batch_select_collab_blob(
|
|||
.push(params.object_id);
|
||||
}
|
||||
|
||||
for (collab_type, mut object_ids) in object_ids_by_collab_type.into_iter() {
|
||||
let partition_key = partition_key_from_collab_type(&collab_type);
|
||||
for (_collab_type, mut object_ids) in object_ids_by_collab_type.into_iter() {
|
||||
let par_results: Result<Vec<QueryCollabData>, sqlx::Error> = sqlx::query_as!(
|
||||
QueryCollabData,
|
||||
r#"
|
||||
SELECT oid, blob
|
||||
FROM af_collab
|
||||
WHERE oid = ANY($1) AND partition_key = $2 AND deleted_at IS NULL;
|
||||
WHERE oid = ANY($1) AND deleted_at IS NULL;
|
||||
"#,
|
||||
&object_ids,
|
||||
partition_key,
|
||||
&object_ids
|
||||
)
|
||||
.fetch_all(pg_pool)
|
||||
.await;
|
||||
|
@ -306,7 +297,7 @@ pub async fn batch_select_collab_blob(
|
|||
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
struct QueryCollabData {
|
||||
oid: String,
|
||||
oid: Uuid,
|
||||
blob: RawData,
|
||||
}
|
||||
|
||||
|
@ -342,7 +333,7 @@ pub async fn create_snapshot(
|
|||
///
|
||||
#[inline]
|
||||
pub async fn latest_snapshot_time<'a, E: Executor<'a, Database = Postgres>>(
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
executor: E,
|
||||
) -> Result<Option<chrono::DateTime<Utc>>, sqlx::Error> {
|
||||
let latest_snapshot_time: Option<chrono::DateTime<Utc>> = sqlx::query_scalar(
|
||||
|
@ -356,7 +347,7 @@ pub async fn latest_snapshot_time<'a, E: Executor<'a, Database = Postgres>>(
|
|||
}
|
||||
#[inline]
|
||||
pub async fn should_create_snapshot2<'a, E: Executor<'a, Database = Postgres>>(
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
executor: E,
|
||||
) -> Result<bool, sqlx::Error> {
|
||||
let hours = Utc::now() - Duration::hours(SNAPSHOT_PER_HOUR);
|
||||
|
@ -379,12 +370,11 @@ pub async fn should_create_snapshot2<'a, E: Executor<'a, Database = Postgres>>(
|
|||
///
|
||||
pub async fn create_snapshot_and_maintain_limit<'a>(
|
||||
mut transaction: Transaction<'a, Postgres>,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: &Uuid,
|
||||
oid: &Uuid,
|
||||
encoded_collab_v1: &[u8],
|
||||
snapshot_limit: i64,
|
||||
) -> Result<AFSnapshotMeta, AppError> {
|
||||
let workspace_id = Uuid::from_str(workspace_id)?;
|
||||
let snapshot_meta = sqlx::query_as!(
|
||||
AFSnapshotMeta,
|
||||
r#"
|
||||
|
@ -392,7 +382,7 @@ pub async fn create_snapshot_and_maintain_limit<'a>(
|
|||
VALUES ($1, $2, $3, $4, $5)
|
||||
RETURNING sid AS snapshot_id, oid AS object_id, created_at
|
||||
"#,
|
||||
oid,
|
||||
oid.to_string(),
|
||||
encoded_collab_v1,
|
||||
encoded_collab_v1.len() as i64,
|
||||
0,
|
||||
|
@ -424,11 +414,10 @@ pub async fn create_snapshot_and_maintain_limit<'a>(
|
|||
#[inline]
|
||||
pub async fn select_snapshot(
|
||||
pg_pool: &PgPool,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
snapshot_id: &i64,
|
||||
) -> Result<Option<AFSnapshotRow>, Error> {
|
||||
let workspace_id = Uuid::from_str(workspace_id).map_err(|err| Error::Decode(err.into()))?;
|
||||
let row = sqlx::query_as!(
|
||||
AFSnapshotRow,
|
||||
r#"
|
||||
|
@ -436,7 +425,7 @@ pub async fn select_snapshot(
|
|||
WHERE sid = $1 AND oid = $2 AND workspace_id = $3 AND deleted_at IS NULL;
|
||||
"#,
|
||||
snapshot_id,
|
||||
object_id,
|
||||
object_id.to_string(),
|
||||
workspace_id
|
||||
)
|
||||
.fetch_optional(pg_pool)
|
||||
|
@ -469,7 +458,7 @@ pub async fn select_latest_snapshot(
|
|||
/// Returns list of snapshots for given object_id in descending order of creation time.
|
||||
pub async fn get_all_collab_snapshot_meta(
|
||||
pg_pool: &PgPool,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
) -> Result<AFSnapshotMetas, Error> {
|
||||
let snapshots: Vec<AFSnapshotMeta> = sqlx::query_as!(
|
||||
AFSnapshotMeta,
|
||||
|
@ -479,7 +468,7 @@ pub async fn get_all_collab_snapshot_meta(
|
|||
WHERE oid = $1 AND deleted_at IS NULL
|
||||
ORDER BY created_at DESC;
|
||||
"#,
|
||||
object_id
|
||||
object_id.to_string()
|
||||
)
|
||||
.fetch_all(pg_pool)
|
||||
.await?;
|
||||
|
@ -507,7 +496,7 @@ fn transform_record_not_found_error(
|
|||
/// For a more efficient lookup, especially in frequent checks, consider using the cached method [CollabCache::is_exist].
|
||||
#[inline]
|
||||
pub async fn is_collab_exists<'a, E: Executor<'a, Database = Postgres>>(
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
executor: E,
|
||||
) -> Result<bool, sqlx::Error> {
|
||||
let result = sqlx::query_scalar!(
|
||||
|
@ -524,7 +513,7 @@ pub async fn is_collab_exists<'a, E: Executor<'a, Database = Postgres>>(
|
|||
pub async fn select_workspace_database_oid<'a, E: Executor<'a, Database = Postgres>>(
|
||||
executor: E,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<String, sqlx::Error> {
|
||||
) -> Result<Uuid, sqlx::Error> {
|
||||
let partition_key = partition_key_from_collab_type(&CollabType::WorkspaceDatabase);
|
||||
sqlx::query_scalar!(
|
||||
r#"
|
||||
|
@ -543,7 +532,7 @@ pub async fn select_workspace_database_oid<'a, E: Executor<'a, Database = Postgr
|
|||
pub async fn select_last_updated_database_row_ids(
|
||||
pg_pool: &PgPool,
|
||||
workspace_id: &Uuid,
|
||||
row_ids: &[String],
|
||||
row_ids: &[Uuid],
|
||||
after: &DateTime<Utc>,
|
||||
) -> Result<Vec<DatabaseRowUpdatedItem>, sqlx::Error> {
|
||||
let updated_row_items = sqlx::query_as!(
|
||||
|
@ -552,7 +541,7 @@ pub async fn select_last_updated_database_row_ids(
|
|||
SELECT
|
||||
updated_at as updated_at,
|
||||
oid as row_id
|
||||
FROM af_collab_database_row
|
||||
FROM af_collab
|
||||
WHERE workspace_id = $1
|
||||
AND oid = ANY($2)
|
||||
AND updated_at > $3
|
||||
|
@ -568,33 +557,24 @@ pub async fn select_last_updated_database_row_ids(
|
|||
|
||||
pub async fn select_collab_embed_info<'a, E>(
|
||||
tx: E,
|
||||
object_id: &str,
|
||||
collab_type: CollabType,
|
||||
object_id: &Uuid,
|
||||
) -> Result<Option<AFCollabEmbedInfo>, sqlx::Error>
|
||||
where
|
||||
E: Executor<'a, Database = Postgres>,
|
||||
{
|
||||
tracing::info!(
|
||||
"select_collab_embed_info: object_id: {}, collab_type: {:?}",
|
||||
object_id,
|
||||
collab_type
|
||||
);
|
||||
let partition_key = partition_key_from_collab_type(&collab_type);
|
||||
tracing::info!("select_collab_embed_info: object_id: {}", object_id);
|
||||
let record = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
ac.oid AS object_id,
|
||||
ac.partition_key,
|
||||
ac.oid as object_id,
|
||||
ace.partition_key,
|
||||
ac.indexed_at,
|
||||
ace.updated_at
|
||||
FROM af_collab_embeddings ac
|
||||
JOIN af_collab ace
|
||||
ON ac.oid = ace.oid
|
||||
AND ac.partition_key = ace.partition_key
|
||||
WHERE ac.oid = $1 AND ac.partition_key = $2
|
||||
JOIN af_collab ace ON ac.oid = ace.oid
|
||||
WHERE ac.oid = $1
|
||||
"#,
|
||||
object_id,
|
||||
partition_key
|
||||
object_id
|
||||
)
|
||||
.fetch_optional(tx)
|
||||
.await?;
|
||||
|
@ -615,37 +595,24 @@ pub async fn batch_select_collab_embed<'a, E>(
|
|||
where
|
||||
E: Executor<'a, Database = Postgres>,
|
||||
{
|
||||
let collab_types: Vec<CollabType> = embedded_collab
|
||||
.iter()
|
||||
.map(|query| query.collab_type)
|
||||
.collect();
|
||||
let object_ids: Vec<String> = embedded_collab
|
||||
let object_ids: Vec<_> = embedded_collab
|
||||
.into_iter()
|
||||
.map(|query| query.object_id)
|
||||
.collect();
|
||||
|
||||
// Collect the partition keys for each collab_type
|
||||
let partition_keys: Vec<i32> = collab_types
|
||||
.iter()
|
||||
.map(partition_key_from_collab_type)
|
||||
.collect();
|
||||
|
||||
// Execute the query to fetch all matching rows
|
||||
let records = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
ac.oid AS object_id,
|
||||
ac.partition_key,
|
||||
ac.oid as object_id,
|
||||
ace.partition_key,
|
||||
ac.indexed_at,
|
||||
ace.updated_at
|
||||
FROM af_collab_embeddings ac
|
||||
JOIN af_collab ace
|
||||
ON ac.oid = ace.oid
|
||||
AND ac.partition_key = ace.partition_key
|
||||
WHERE ac.oid = ANY($1) AND ac.partition_key = ANY($2)
|
||||
JOIN af_collab ace ON ac.oid = ace.oid
|
||||
WHERE ac.oid = ANY($1)
|
||||
"#,
|
||||
&object_ids,
|
||||
&partition_keys
|
||||
&object_ids
|
||||
)
|
||||
.fetch_all(executor)
|
||||
.await?;
|
||||
|
@ -654,7 +621,7 @@ where
|
|||
let mut items = vec![];
|
||||
for row in records {
|
||||
let embed_info = AFCollabEmbedInfo {
|
||||
object_id: row.object_id.clone(),
|
||||
object_id: row.object_id,
|
||||
indexed_at: DateTime::<Utc>::from_naive_utc_and_offset(row.indexed_at, Utc),
|
||||
updated_at: row.updated_at,
|
||||
};
|
||||
|
|
|
@ -11,6 +11,7 @@ use collab::entity::EncodedCollab;
|
|||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::Transaction;
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const COLLAB_SNAPSHOT_LIMIT: i64 = 30;
|
||||
pub const SNAPSHOT_PER_HOUR: i64 = 6;
|
||||
|
@ -21,30 +22,39 @@ pub type AppResult<T, E = AppError> = core::result::Result<T, E>;
|
|||
#[async_trait]
|
||||
pub trait CollabStorageAccessControl: Send + Sync + 'static {
|
||||
/// Updates the cache of the access level of the user for given collab object.
|
||||
async fn update_policy(&self, uid: &i64, oid: &str, level: AFAccessLevel)
|
||||
-> Result<(), AppError>;
|
||||
async fn update_policy(
|
||||
&self,
|
||||
uid: &i64,
|
||||
oid: &Uuid,
|
||||
level: AFAccessLevel,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
/// Removes the access level of the user for given collab object.
|
||||
async fn enforce_read_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
/// Enforce the user's permission to write to the collab object.
|
||||
async fn enforce_write_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
/// Enforce the user's permission to write to the workspace.
|
||||
async fn enforce_write_workspace(&self, uid: &i64, workspace_id: &str) -> Result<(), AppError>;
|
||||
async fn enforce_write_workspace(&self, uid: &i64, workspace_id: &Uuid) -> Result<(), AppError>;
|
||||
|
||||
/// Enforce the user's permission to delete the collab object.
|
||||
async fn enforce_delete(&self, workspace_id: &str, uid: &i64, oid: &str) -> Result<(), AppError>;
|
||||
async fn enforce_delete(
|
||||
&self,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &Uuid,
|
||||
) -> Result<(), AppError>;
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
|
@ -70,7 +80,7 @@ pub trait CollabStorage: Send + Sync + 'static {
|
|||
///
|
||||
async fn queue_insert_or_update_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
flush_to_disk: bool,
|
||||
|
@ -78,7 +88,7 @@ pub trait CollabStorage: Send + Sync + 'static {
|
|||
|
||||
async fn batch_insert_new_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params: Vec<CollabParams>,
|
||||
) -> AppResult<()>;
|
||||
|
@ -94,7 +104,7 @@ pub trait CollabStorage: Send + Sync + 'static {
|
|||
/// * `Result<()>` - Returns `Ok(())` if the collaboration was created successfully, `Err` otherwise.
|
||||
async fn upsert_new_collab_with_transaction(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
transaction: &mut Transaction<'_, sqlx::Postgres>,
|
||||
|
@ -120,10 +130,10 @@ pub trait CollabStorage: Send + Sync + 'static {
|
|||
async fn batch_get_collab(
|
||||
&self,
|
||||
uid: &i64,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
queries: Vec<QueryCollab>,
|
||||
from_editing_collab: bool,
|
||||
) -> HashMap<String, QueryCollabResult>;
|
||||
) -> HashMap<Uuid, QueryCollabResult>;
|
||||
|
||||
/// Deletes a collaboration from the storage.
|
||||
///
|
||||
|
@ -134,37 +144,60 @@ pub trait CollabStorage: Send + Sync + 'static {
|
|||
/// # Returns
|
||||
///
|
||||
/// * `Result<()>` - Returns `Ok(())` if the collaboration was deleted successfully, `Err` otherwise.
|
||||
async fn delete_collab(&self, workspace_id: &str, uid: &i64, object_id: &str) -> AppResult<()>;
|
||||
async fn delete_collab(&self, workspace_id: &Uuid, uid: &i64, object_id: &Uuid) -> AppResult<()>;
|
||||
|
||||
async fn should_create_snapshot(&self, workspace_id: &str, oid: &str) -> Result<bool, AppError>;
|
||||
async fn should_create_snapshot(&self, workspace_id: &Uuid, oid: &Uuid)
|
||||
-> Result<bool, AppError>;
|
||||
|
||||
async fn create_snapshot(&self, params: InsertSnapshotParams) -> AppResult<AFSnapshotMeta>;
|
||||
async fn queue_snapshot(&self, params: InsertSnapshotParams) -> AppResult<()>;
|
||||
|
||||
async fn get_collab_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
snapshot_id: &i64,
|
||||
) -> AppResult<SnapshotData>;
|
||||
|
||||
async fn get_latest_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> AppResult<Option<SnapshotData>>;
|
||||
|
||||
/// Returns list of snapshots for given object_id in descending order of creation time.
|
||||
async fn get_collab_snapshot_list(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: &Uuid,
|
||||
oid: &Uuid,
|
||||
) -> AppResult<AFSnapshotMetas>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CollabMetadata {
|
||||
pub object_id: String,
|
||||
pub workspace_id: String,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub object_id: Uuid,
|
||||
#[serde(with = "uuid_str")]
|
||||
pub workspace_id: Uuid,
|
||||
}
|
||||
|
||||
mod uuid_str {
|
||||
use serde::Deserialize;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub fn serialize<S>(uuid: &Uuid, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(&uuid.to_string())
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Uuid, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
Uuid::parse_str(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,10 +11,10 @@ use uuid::Uuid;
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn insert_history<'a>(
|
||||
workspace_id: &Uuid,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
doc_state: Vec<u8>,
|
||||
doc_state_version: i32,
|
||||
deps_snapshot_id: Option<String>,
|
||||
deps_snapshot_id: Option<&Uuid>,
|
||||
collab_type: CollabType,
|
||||
created_at: i64,
|
||||
snapshots: Vec<SnapshotMetaPb>,
|
||||
|
@ -61,7 +61,7 @@ pub async fn insert_history<'a>(
|
|||
|
||||
async fn insert_snapshot_meta<'a, E: Executor<'a, Database = Postgres>>(
|
||||
workspace_id: &Uuid,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
meta: SnapshotMetaPb,
|
||||
partition_key: i32,
|
||||
executor: E,
|
||||
|
@ -72,7 +72,7 @@ async fn insert_snapshot_meta<'a, E: Executor<'a, Database = Postgres>>(
|
|||
VALUES ($1, $2, $3, $4, $5, $6)
|
||||
ON CONFLICT DO NOTHING
|
||||
"#,
|
||||
oid,
|
||||
oid.to_string(),
|
||||
workspace_id,
|
||||
meta.snapshot,
|
||||
meta.snapshot_version,
|
||||
|
@ -95,22 +95,24 @@ async fn insert_snapshot_meta<'a, E: Executor<'a, Database = Postgres>>(
|
|||
/// Returns a vector of `AFSnapshotMetaPbRow` struct instances containing the snapshot data.
|
||||
/// This vector is empty if no records match the criteria.
|
||||
pub async fn get_snapshot_meta_list<'a>(
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
collab_type: &CollabType,
|
||||
pool: &PgPool,
|
||||
) -> Result<Vec<AFSnapshotMetaPbRow>, sqlx::Error> {
|
||||
let partition_key = partition_key_from_collab_type(collab_type);
|
||||
let order_clause = "DESC";
|
||||
let query = format!(
|
||||
"SELECT oid, snapshot, snapshot_version, created_at FROM af_snapshot_meta WHERE oid = $1 AND partition_key = $2 ORDER BY created_at {}",
|
||||
order_clause
|
||||
);
|
||||
|
||||
let rows = sqlx::query_as::<_, AFSnapshotMetaPbRow>(&query)
|
||||
.bind(oid)
|
||||
.bind(partition_key)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
let rows: Vec<_> = sqlx::query_as!(
|
||||
AFSnapshotMetaPbRow,
|
||||
r#"
|
||||
SELECT oid, snapshot, snapshot_version, created_at
|
||||
FROM af_snapshot_meta
|
||||
WHERE oid = $1 AND partition_key = $2
|
||||
ORDER BY created_at DESC"#,
|
||||
oid.to_string(),
|
||||
partition_key
|
||||
)
|
||||
.fetch_all(pool)
|
||||
.await?;
|
||||
|
||||
Ok(rows)
|
||||
}
|
||||
|
@ -130,24 +132,20 @@ pub async fn get_snapshot_meta_list<'a>(
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
async fn insert_snapshot_state<'a, E: Executor<'a, Database = Postgres>>(
|
||||
workspace_id: &Uuid,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
doc_state: Vec<u8>,
|
||||
doc_state_version: i32,
|
||||
deps_snapshot_id: Option<String>,
|
||||
deps_snapshot_id: Option<&Uuid>,
|
||||
partition_key: i32,
|
||||
created_at: i64,
|
||||
executor: E,
|
||||
) -> Result<(), sqlx::Error> {
|
||||
let deps_snapshot_id = match deps_snapshot_id {
|
||||
Some(id) => Uuid::parse_str(&id).ok(),
|
||||
None => None,
|
||||
};
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO af_snapshot_state (oid, workspace_id, doc_state, doc_state_version, deps_snapshot_id, partition_key, created_at)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, $7)
|
||||
"#,
|
||||
oid,
|
||||
oid.to_string(),
|
||||
workspace_id,
|
||||
doc_state,
|
||||
doc_state_version,
|
||||
|
@ -164,7 +162,7 @@ async fn insert_snapshot_state<'a, E: Executor<'a, Database = Postgres>>(
|
|||
/// that has a `created_at` timestamp greater than or equal to the specified timestamp.
|
||||
///
|
||||
pub async fn get_latest_snapshot_state<'a, E: Executor<'a, Database = Postgres>>(
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
timestamp: i64,
|
||||
collab_type: &CollabType,
|
||||
executor: E,
|
||||
|
@ -179,7 +177,7 @@ pub async fn get_latest_snapshot_state<'a, E: Executor<'a, Database = Postgres>>
|
|||
ORDER BY created_at ASC
|
||||
LIMIT 1
|
||||
"#,
|
||||
oid,
|
||||
oid.to_string(),
|
||||
partition_key,
|
||||
timestamp,
|
||||
)
|
||||
|
@ -190,7 +188,7 @@ pub async fn get_latest_snapshot_state<'a, E: Executor<'a, Database = Postgres>>
|
|||
|
||||
/// Gets the latest snapshot for the specified object identifier and partition key.
|
||||
pub async fn get_latest_snapshot(
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
collab_type: &CollabType,
|
||||
pool: &PgPool,
|
||||
) -> Result<Option<SingleSnapshotInfoPb>, sqlx::Error> {
|
||||
|
@ -206,7 +204,7 @@ pub async fn get_latest_snapshot(
|
|||
ORDER BY created_at DESC
|
||||
LIMIT 1
|
||||
"#,
|
||||
oid,
|
||||
oid.to_string(),
|
||||
partition_key,
|
||||
)
|
||||
.fetch_optional(transaction.deref_mut())
|
||||
|
|
|
@ -15,8 +15,7 @@ use uuid::Uuid;
|
|||
pub async fn get_index_status<'a, E>(
|
||||
tx: E,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &str,
|
||||
partition_key: i32,
|
||||
object_id: &Uuid,
|
||||
) -> Result<IndexingStatus, sqlx::Error>
|
||||
where
|
||||
E: Executor<'a, Database = Postgres>,
|
||||
|
@ -29,13 +28,12 @@ SELECT
|
|||
WHEN w.settings['disable_search_indexing']::boolean THEN
|
||||
FALSE
|
||||
ELSE
|
||||
EXISTS (SELECT 1 FROM af_collab_embeddings m WHERE m.partition_key = $3 AND m.oid = $2)
|
||||
EXISTS (SELECT 1 FROM af_collab_embeddings m WHERE m.oid = $2::uuid)
|
||||
END as has_index
|
||||
FROM af_workspace w
|
||||
WHERE w.workspace_id = $1"#,
|
||||
workspace_id,
|
||||
object_id,
|
||||
partition_key
|
||||
object_id
|
||||
)
|
||||
.fetch_one(tx)
|
||||
.await;
|
||||
|
@ -96,8 +94,7 @@ impl PgHasArrayType for Fragment {
|
|||
pub async fn upsert_collab_embeddings(
|
||||
transaction: &mut Transaction<'_, Postgres>,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &str,
|
||||
collab_type: CollabType,
|
||||
object_id: &Uuid,
|
||||
tokens_used: u32,
|
||||
records: Vec<AFCollabEmbeddedChunk>,
|
||||
) -> Result<(), sqlx::Error> {
|
||||
|
@ -107,10 +104,9 @@ pub async fn upsert_collab_embeddings(
|
|||
object_id,
|
||||
fragments.len()
|
||||
);
|
||||
sqlx::query(r#"CALL af_collab_embeddings_upsert($1, $2, $3, $4, $5::af_fragment_v3[])"#)
|
||||
sqlx::query(r#"CALL af_collab_embeddings_upsert($1, $2, $3, $4::af_fragment_v3[])"#)
|
||||
.bind(*workspace_id)
|
||||
.bind(object_id)
|
||||
.bind(crate::collab::partition_key_from_collab_type(&collab_type))
|
||||
.bind(tokens_used as i32)
|
||||
.bind(fragments)
|
||||
.execute(transaction.deref_mut())
|
||||
|
@ -150,7 +146,7 @@ pub async fn stream_collabs_without_embeddings(
|
|||
|
||||
pub async fn update_collab_indexed_at<'a, E>(
|
||||
tx: E,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
collab_type: &CollabType,
|
||||
indexed_at: DateTime<Utc>,
|
||||
) -> Result<(), Error>
|
||||
|
@ -176,40 +172,26 @@ where
|
|||
|
||||
pub async fn get_collabs_indexed_at<'a, E>(
|
||||
executor: E,
|
||||
collab_ids: Vec<(String, CollabType)>,
|
||||
) -> Result<HashMap<String, DateTime<Utc>>, Error>
|
||||
oids: Vec<Uuid>,
|
||||
) -> Result<HashMap<Uuid, DateTime<Utc>>, Error>
|
||||
where
|
||||
E: Executor<'a, Database = Postgres>,
|
||||
{
|
||||
let (oids, partition_keys): (Vec<String>, Vec<i32>) = collab_ids
|
||||
.into_iter()
|
||||
.map(|(object_id, collab_type)| (object_id, partition_key_from_collab_type(&collab_type)))
|
||||
.unzip();
|
||||
|
||||
let result = sqlx::query!(
|
||||
r#"
|
||||
SELECT oid, indexed_at
|
||||
FROM af_collab
|
||||
WHERE (oid, partition_key) = ANY (
|
||||
SELECT UNNEST($1::text[]), UNNEST($2::int[])
|
||||
)
|
||||
WHERE oid = ANY (SELECT UNNEST($1::uuid[]))
|
||||
"#,
|
||||
&oids,
|
||||
&partition_keys
|
||||
&oids
|
||||
)
|
||||
.fetch_all(executor)
|
||||
.await?;
|
||||
|
||||
let map = result
|
||||
.into_iter()
|
||||
.filter_map(|r| {
|
||||
if let Some(indexed_at) = r.indexed_at {
|
||||
Some((r.oid, indexed_at))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<String, DateTime<Utc>>>();
|
||||
.filter_map(|r| r.indexed_at.map(|indexed_at| (r.oid, indexed_at)))
|
||||
.collect::<HashMap<Uuid, DateTime<Utc>>>();
|
||||
Ok(map)
|
||||
}
|
||||
|
||||
|
@ -217,13 +199,13 @@ where
|
|||
pub struct CollabId {
|
||||
pub collab_type: CollabType,
|
||||
pub workspace_id: Uuid,
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
}
|
||||
|
||||
impl From<CollabId> for QueryCollabParams {
|
||||
fn from(value: CollabId) -> Self {
|
||||
QueryCollabParams {
|
||||
workspace_id: value.workspace_id.to_string(),
|
||||
workspace_id: value.workspace_id,
|
||||
inner: QueryCollab {
|
||||
object_id: value.object_id,
|
||||
collab_type: value.collab_type,
|
||||
|
|
|
@ -28,16 +28,16 @@ pub async fn search_documents<'a, E: Executor<'a, Database = Postgres>>(
|
|||
SELECT
|
||||
em.oid AS object_id,
|
||||
collab.workspace_id,
|
||||
em.partition_key AS collab_type,
|
||||
collab.partition_key AS collab_type,
|
||||
em.content_type,
|
||||
LEFT(em.content, $4) AS content_preview,
|
||||
u.name AS created_by,
|
||||
collab.created_at AS created_at,
|
||||
em.embedding <=> $3 AS score
|
||||
FROM af_collab_embeddings em
|
||||
JOIN af_collab collab ON em.oid = collab.oid AND em.partition_key = collab.partition_key
|
||||
JOIN af_collab collab ON em.oid = collab.oid
|
||||
JOIN af_user u ON collab.owner_uid = u.uid
|
||||
WHERE collab.workspace_id = $2 AND (collab.oid = ANY($7::text[]))
|
||||
WHERE collab.workspace_id = $2 AND (collab.oid = ANY($7::uuid[]))
|
||||
ORDER BY em.embedding <=> $3
|
||||
LIMIT $5
|
||||
"#,
|
||||
|
@ -66,13 +66,13 @@ pub struct SearchDocumentParams {
|
|||
/// Embedding of the query - generated by OpenAI embedder.
|
||||
pub embedding: Vec<f32>,
|
||||
/// List of view ids which is not supposed to be returned in the search results.
|
||||
pub searchable_view_ids: Vec<String>,
|
||||
pub searchable_view_ids: Vec<Uuid>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, sqlx::FromRow)]
|
||||
pub struct SearchDocumentItem {
|
||||
/// Document identifier.
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
/// Workspace identifier, given document belongs to.
|
||||
pub workspace_id: Uuid,
|
||||
/// Partition key, which maps directly onto [collab_entity::CollabType].
|
||||
|
|
|
@ -1051,7 +1051,6 @@ pub async fn upsert_workspace_settings(
|
|||
DELETE FROM af_collab_embeddings e
|
||||
USING af_collab c
|
||||
WHERE e.oid = c.oid
|
||||
AND e.partition_key = c.partition_key
|
||||
AND c.workspace_id = $1
|
||||
"#,
|
||||
workspace_id
|
||||
|
|
|
@ -25,7 +25,7 @@ impl Indexer for DocumentIndexer {
|
|||
collab: &Collab,
|
||||
embedding_model: EmbeddingModel,
|
||||
) -> Result<Vec<AFCollabEmbeddedChunk>, AppError> {
|
||||
let object_id = collab.object_id().to_string();
|
||||
let object_id = collab.object_id().parse()?;
|
||||
let document = DocumentBody::from_collab(collab).ok_or_else(|| {
|
||||
anyhow!(
|
||||
"Failed to get document body from collab `{}`: schema is missing required fields",
|
||||
|
@ -48,7 +48,7 @@ impl Indexer for DocumentIndexer {
|
|||
|
||||
fn create_embedded_chunks_from_text(
|
||||
&self,
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
text: String,
|
||||
model: EmbeddingModel,
|
||||
) -> Result<Vec<AFCollabEmbeddedChunk>, AppError> {
|
||||
|
@ -102,7 +102,7 @@ impl Indexer for DocumentIndexer {
|
|||
}
|
||||
|
||||
fn split_text_into_chunks(
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
content: String,
|
||||
collab_type: CollabType,
|
||||
embedding_model: &EmbeddingModel,
|
||||
|
@ -118,15 +118,14 @@ fn split_text_into_chunks(
|
|||
// We assume that every token is ~4 bytes. We're going to split document content into fragments
|
||||
// of ~2000 tokens each.
|
||||
let split_contents = split_text_by_max_content_len(content, 8000)?;
|
||||
let metadata =
|
||||
json!({"id": object_id, "source": "appflowy", "name": "document", "collab_type": collab_type });
|
||||
let metadata = json!({"id": object_id.to_string(), "source": "appflowy", "name": "document", "collab_type": collab_type });
|
||||
Ok(
|
||||
split_contents
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.map(|(index, content)| AFCollabEmbeddedChunk {
|
||||
fragment_id: Uuid::new_v4().to_string(),
|
||||
object_id: object_id.clone(),
|
||||
object_id,
|
||||
content_type: EmbeddingContentType::PlainText,
|
||||
content,
|
||||
embedding: None,
|
||||
|
|
|
@ -9,6 +9,7 @@ use infra::env_util::get_env_var;
|
|||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub trait Indexer: Send + Sync {
|
||||
fn create_embedded_chunks_from_collab(
|
||||
|
@ -19,7 +20,7 @@ pub trait Indexer: Send + Sync {
|
|||
|
||||
fn create_embedded_chunks_from_text(
|
||||
&self,
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
text: String,
|
||||
model: EmbeddingModel,
|
||||
) -> Result<Vec<AFCollabEmbeddedChunk>, AppError>;
|
||||
|
|
|
@ -5,21 +5,21 @@ use uuid::Uuid;
|
|||
|
||||
pub struct UnindexedCollab {
|
||||
pub workspace_id: Uuid,
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
pub collab_type: CollabType,
|
||||
pub collab: EncodedCollab,
|
||||
}
|
||||
|
||||
pub struct EmbeddingRecord {
|
||||
pub workspace_id: Uuid,
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
pub collab_type: CollabType,
|
||||
pub tokens_used: u32,
|
||||
pub contents: Vec<AFCollabEmbeddedChunk>,
|
||||
}
|
||||
|
||||
impl EmbeddingRecord {
|
||||
pub fn empty(workspace_id: Uuid, object_id: String, collab_type: CollabType) -> Self {
|
||||
pub fn empty(workspace_id: Uuid, object_id: Uuid, collab_type: CollabType) -> Self {
|
||||
Self {
|
||||
workspace_id,
|
||||
object_id,
|
||||
|
|
|
@ -241,8 +241,8 @@ impl IndexerScheduler {
|
|||
|
||||
pub async fn index_collab_immediately(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab: &Collab,
|
||||
collab_type: CollabType,
|
||||
) -> Result<(), AppError> {
|
||||
|
@ -263,8 +263,8 @@ impl IndexerScheduler {
|
|||
if let Some(text) = text {
|
||||
if !text.is_empty() {
|
||||
let pending = UnindexedCollabTask::new(
|
||||
Uuid::parse_str(workspace_id)?,
|
||||
object_id.to_string(),
|
||||
workspace_id,
|
||||
object_id,
|
||||
collab_type,
|
||||
UnindexedData::Text(text),
|
||||
);
|
||||
|
@ -280,13 +280,12 @@ impl IndexerScheduler {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn can_index_workspace(&self, workspace_id: &str) -> Result<bool, AppError> {
|
||||
pub async fn can_index_workspace(&self, workspace_id: &Uuid) -> Result<bool, AppError> {
|
||||
if !self.index_enabled() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let uuid = Uuid::parse_str(workspace_id)?;
|
||||
let settings = select_workspace_settings(&self.pg_pool, &uuid).await?;
|
||||
let settings = select_workspace_settings(&self.pg_pool, workspace_id).await?;
|
||||
match settings {
|
||||
None => Ok(true),
|
||||
Some(settings) => Ok(!settings.disable_search_indexing),
|
||||
|
@ -343,7 +342,7 @@ async fn spawn_rayon_generate_embeddings(
|
|||
records.into_par_iter().for_each(|record| {
|
||||
let result = threads.install(|| {
|
||||
let indexer = indexer_provider.indexer_for(record.collab_type);
|
||||
match process_collab(&embedder, indexer, &record.object_id, record.data, &metrics) {
|
||||
match process_collab(&embedder, indexer, record.object_id, record.data, &metrics) {
|
||||
Ok(Some((tokens_used, contents))) => {
|
||||
if let Err(err) = write_embedding_tx.send(EmbeddingRecord {
|
||||
workspace_id: record.workspace_id,
|
||||
|
@ -448,7 +447,7 @@ pub(crate) async fn batch_insert_records(
|
|||
let mut seen = HashSet::new();
|
||||
let records = records
|
||||
.into_iter()
|
||||
.filter(|record| seen.insert(record.object_id.clone()))
|
||||
.filter(|record| seen.insert(record.object_id))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut txn = pg_pool.begin().await?;
|
||||
|
@ -465,7 +464,6 @@ pub(crate) async fn batch_insert_records(
|
|||
&mut txn,
|
||||
&record.workspace_id,
|
||||
&record.object_id,
|
||||
record.collab_type,
|
||||
record.tokens_used,
|
||||
record.contents,
|
||||
)
|
||||
|
@ -483,14 +481,14 @@ pub(crate) async fn batch_insert_records(
|
|||
fn process_collab(
|
||||
embedder: &Embedder,
|
||||
indexer: Option<Arc<dyn Indexer>>,
|
||||
object_id: &str,
|
||||
object_id: Uuid,
|
||||
data: UnindexedData,
|
||||
metrics: &EmbeddingMetrics,
|
||||
) -> Result<Option<(u32, Vec<AFCollabEmbeddedChunk>)>, AppError> {
|
||||
if let Some(indexer) = indexer {
|
||||
let chunks = match data {
|
||||
UnindexedData::Text(text) => {
|
||||
indexer.create_embedded_chunks_from_text(object_id.to_string(), text, embedder.model())?
|
||||
indexer.create_embedded_chunks_from_text(object_id, text, embedder.model())?
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -516,7 +514,7 @@ fn process_collab(
|
|||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UnindexedCollabTask {
|
||||
pub workspace_id: Uuid,
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
pub collab_type: CollabType,
|
||||
pub data: UnindexedData,
|
||||
pub created_at: i64,
|
||||
|
@ -525,7 +523,7 @@ pub struct UnindexedCollabTask {
|
|||
impl UnindexedCollabTask {
|
||||
pub fn new(
|
||||
workspace_id: Uuid,
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
data: UnindexedData,
|
||||
) -> Self {
|
||||
|
|
|
@ -81,7 +81,7 @@ async fn index_then_write_embedding_to_disk(
|
|||
"[Embedding] process batch {:?} embeddings",
|
||||
unindexed_collabs
|
||||
.iter()
|
||||
.map(|v| v.object_id.clone())
|
||||
.map(|v| v.object_id)
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
|
@ -173,7 +173,7 @@ async fn create_embeddings(
|
|||
let indexer = indexer_provider.indexer_for(unindexed.collab_type)?;
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
&unindexed.object_id,
|
||||
&unindexed.object_id.to_string(),
|
||||
DataSource::DocStateV1(unindexed.collab.doc_state.into()),
|
||||
vec![],
|
||||
false,
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
use crate::dto::chat_dto::ChatMessage;
|
||||
pub use appflowy_ai_client::dto::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct SummarizeRowParams {
|
||||
pub workspace_id: String,
|
||||
pub workspace_id: Uuid,
|
||||
pub data: SummarizeRowData,
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ pub struct CreateChatParams {
|
|||
#[validate(custom(function = "validate_not_empty_str"))]
|
||||
pub chat_id: String,
|
||||
pub name: String,
|
||||
pub rag_ids: Vec<String>,
|
||||
pub rag_ids: Vec<Uuid>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Validate, Serialize, Deserialize)]
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::workspace_dto::{ViewIcon, ViewLayout};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Copied from AppFlowy-IO/AppFlowy/frontend/rust-lib/flowy-folder-pub/src/entities.rs
|
||||
/// TODO(zack): make AppFlowy use from this crate instead
|
||||
|
@ -47,20 +47,20 @@ pub struct PublishDatabaseData {
|
|||
|
||||
/// The encoded collab data for the database rows
|
||||
/// Use the row_id as the key
|
||||
pub database_row_collabs: HashMap<String, Vec<u8>>,
|
||||
pub database_row_collabs: HashMap<Uuid, Vec<u8>>,
|
||||
|
||||
/// The encoded collab data for the documents inside the database rows
|
||||
/// It's not used for now
|
||||
pub database_row_document_collabs: HashMap<String, Vec<u8>>,
|
||||
pub database_row_document_collabs: HashMap<Uuid, Vec<u8>>,
|
||||
|
||||
/// Visible view ids
|
||||
pub visible_database_view_ids: Vec<String>,
|
||||
pub visible_database_view_ids: Vec<Uuid>,
|
||||
|
||||
/// Relation view id map
|
||||
pub database_relations: HashMap<String, String>,
|
||||
pub database_relations: HashMap<Uuid, Uuid>,
|
||||
}
|
||||
|
||||
#[derive(Default, Deserialize, Serialize, Clone, Debug, Eq, PartialEq)]
|
||||
pub struct DuplicatePublishedPageResponse {
|
||||
pub view_id: String,
|
||||
pub view_id: Uuid,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_repr::{Deserialize_repr, Serialize_repr};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Parameters used to customize the collab vector search query.
|
||||
/// In response, a list of [SearchDocumentResponseItem] is returned.
|
||||
|
@ -21,11 +22,12 @@ pub struct SearchDocumentRequest {
|
|||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct SearchDocumentResponseItem {
|
||||
/// Unique object identifier.
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
/// Workspace, result object belongs to.
|
||||
pub workspace_id: String,
|
||||
/// Match score of this search result to an original query.
|
||||
/// The lower the better. List of results is sorted by this value by default.
|
||||
pub workspace_id: Uuid,
|
||||
/// Match score of this search result to an original query. Score represents cosine distance
|
||||
/// between the query and the document embedding [-1.0..1.0]. The higher, the better.
|
||||
/// List of results is sorted by this value by default.
|
||||
pub score: f64,
|
||||
/// Type of the content to be presented in preview field. This is a hint what
|
||||
/// kind of content was used to match the user query ie. document plain text, pdf attachment etc.
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
use app_error::AppError;
|
||||
use chrono::{DateTime, Utc};
|
||||
use collab_entity::{CollabType, EncodedCollab};
|
||||
use database_entity::dto::{AFRole, AFWebUser, AFWorkspaceInvitationStatus, PublishInfo};
|
||||
|
@ -131,7 +132,7 @@ pub struct RepeatedEmbeddedCollabQuery(pub Vec<EmbeddedCollabQuery>);
|
|||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct EmbeddedCollabQuery {
|
||||
pub collab_type: CollabType,
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
|
@ -150,7 +151,7 @@ pub struct CollabResponse {
|
|||
///
|
||||
/// We can remove this 'serde(default)' after the 0325 version is stable.
|
||||
#[serde(default)]
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
@ -160,7 +161,7 @@ pub struct Space {
|
|||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Page {
|
||||
pub view_id: String,
|
||||
pub view_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
@ -182,7 +183,7 @@ pub struct UpdateSpaceParams {
|
|||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CreatePageParams {
|
||||
pub parent_view_id: String,
|
||||
pub parent_view_id: Uuid,
|
||||
pub layout: ViewLayout,
|
||||
pub name: Option<String>,
|
||||
pub page_data: Option<serde_json::Value>,
|
||||
|
@ -239,7 +240,7 @@ pub struct CreatePageDatabaseViewParams {
|
|||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PageCollabData {
|
||||
pub encoded_collab: Vec<u8>,
|
||||
pub row_data: HashMap<String, Vec<u8>>,
|
||||
pub row_data: HashMap<Uuid, Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
@ -252,8 +253,8 @@ pub struct PageCollab {
|
|||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PublishedDuplicate {
|
||||
pub published_view_id: String,
|
||||
pub dest_view_id: String,
|
||||
pub published_view_id: Uuid,
|
||||
pub dest_view_id: Uuid,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
|
||||
|
@ -295,9 +296,9 @@ pub struct TrashSectionItems {
|
|||
|
||||
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FolderView {
|
||||
pub view_id: String,
|
||||
pub parent_view_id: String,
|
||||
pub prev_view_id: Option<String>,
|
||||
pub view_id: Uuid,
|
||||
pub parent_view_id: Option<Uuid>,
|
||||
pub prev_view_id: Option<Uuid>,
|
||||
pub name: String,
|
||||
pub icon: Option<ViewIcon>,
|
||||
pub is_space: bool,
|
||||
|
@ -333,7 +334,7 @@ pub struct PublishInfoView {
|
|||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct PublishPageParams {
|
||||
pub publish_name: Option<String>,
|
||||
pub visible_database_view_ids: Option<Vec<String>>,
|
||||
pub visible_database_view_ids: Option<Vec<Uuid>>,
|
||||
pub comments_enabled: Option<bool>,
|
||||
pub duplicate_enabled: Option<bool>,
|
||||
}
|
||||
|
@ -422,15 +423,19 @@ impl ListDatabaseRowDetailParam {
|
|||
with_doc: Some(with_doc),
|
||||
}
|
||||
}
|
||||
pub fn into_ids(&self) -> Vec<&str> {
|
||||
self.ids.split(',').collect()
|
||||
pub fn into_ids(&self) -> Result<Vec<Uuid>, AppError> {
|
||||
let mut res = Vec::new();
|
||||
for uuid in self.ids.split(',') {
|
||||
res.push(Uuid::parse_str(uuid)?);
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Deserialize, Serialize)]
|
||||
pub struct QueryWorkspaceFolder {
|
||||
pub depth: Option<u32>,
|
||||
pub root_view_id: Option<String>,
|
||||
pub root_view_id: Option<Uuid>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Serialize, Deserialize)]
|
||||
|
|
|
@ -6,13 +6,13 @@ use async_trait::async_trait;
|
|||
use collab::core::origin::CollabOrigin;
|
||||
use collab::entity::EncodedCollab;
|
||||
|
||||
use crate::hierarchy_builder::{FlattedViews, WorkspaceViewBuilder};
|
||||
use collab::preclude::Collab;
|
||||
use collab_entity::CollabType;
|
||||
use collab_folder::{
|
||||
timestamp, Folder, FolderData, RepeatedViewIdentifier, ViewIdentifier, ViewLayout, Workspace,
|
||||
};
|
||||
|
||||
use crate::hierarchy_builder::{FlattedViews, WorkspaceViewBuilder};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub mod database;
|
||||
pub mod document;
|
||||
|
@ -63,7 +63,7 @@ pub struct WorkspaceTemplateBuilder {
|
|||
}
|
||||
|
||||
impl WorkspaceTemplateBuilder {
|
||||
pub fn new(uid: i64, workspace_id: &str) -> Self {
|
||||
pub fn new(uid: i64, workspace_id: &Uuid) -> Self {
|
||||
let handlers = WorkspaceTemplateHandlers::default();
|
||||
Self {
|
||||
uid,
|
||||
|
@ -162,6 +162,6 @@ impl WorkspaceTemplateBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn gen_view_id() -> String {
|
||||
uuid::Uuid::new_v4().to_string()
|
||||
pub fn gen_view_id() -> Uuid {
|
||||
uuid::Uuid::new_v4()
|
||||
}
|
||||
|
|
104
migrations/20250318120849_departition_af_collab.sql
Normal file
104
migrations/20250318120849_departition_af_collab.sql
Normal file
|
@ -0,0 +1,104 @@
|
|||
set statement_timeout to 3600000;
|
||||
|
||||
-- create new uniform collab table
|
||||
create table af_collab_temp
|
||||
(
|
||||
oid uuid not null primary key,
|
||||
workspace_id uuid not null references public.af_workspace on delete cascade,
|
||||
owner_uid bigint not null,
|
||||
partition_key integer not null,
|
||||
len integer,
|
||||
blob bytea not null,
|
||||
deleted_at timestamp with time zone,
|
||||
created_at timestamp with time zone default CURRENT_TIMESTAMP,
|
||||
updated_at timestamp with time zone default CURRENT_TIMESTAMP not null,
|
||||
indexed_at timestamp with time zone
|
||||
);
|
||||
|
||||
-- create a table for collabs that have non-UUID object ids
|
||||
create table af_collab_conflicts
|
||||
(
|
||||
oid text not null primary key,
|
||||
workspace_id uuid not null references public.af_workspace on delete cascade,
|
||||
owner_uid bigint not null,
|
||||
partition_key integer not null,
|
||||
len integer,
|
||||
blob bytea not null,
|
||||
deleted_at timestamp with time zone,
|
||||
created_at timestamp with time zone default CURRENT_TIMESTAMP,
|
||||
updated_at timestamp with time zone default CURRENT_TIMESTAMP not null,
|
||||
indexed_at timestamp with time zone
|
||||
);
|
||||
|
||||
-- move non-UUID object ids to the new table (execution time: 31 secs)
|
||||
insert into af_collab_conflicts(oid, workspace_id, owner_uid, partition_key, len, blob, deleted_at, created_at, updated_at, indexed_at)
|
||||
select oid, workspace_id, owner_uid, partition_key, len, blob, deleted_at, created_at, updated_at, indexed_at
|
||||
from af_collab
|
||||
where oid !~ E'^[[:xdigit:]]{8}-([[:xdigit:]]{4}-){3}[[:xdigit:]]{12}$';
|
||||
|
||||
-- copy data from all collab partitions to new collab table (execution time: 7 mins)
|
||||
insert into af_collab_temp(oid, workspace_id, owner_uid, partition_key, len, blob, deleted_at, created_at, updated_at, indexed_at)
|
||||
select oid::uuid as oid, workspace_id, owner_uid, partition_key, len, blob, deleted_at, created_at, updated_at, indexed_at
|
||||
from af_collab
|
||||
where oid ~ E'^[[:xdigit:]]{8}-([[:xdigit:]]{4}-){3}[[:xdigit:]]{12}$';
|
||||
|
||||
-- prune embeddings table
|
||||
truncate table af_collab_embeddings;
|
||||
alter table af_collab_embeddings
|
||||
drop constraint af_collab_embeddings_oid_partition_key_fkey;
|
||||
|
||||
-- replace af_collab table
|
||||
drop table af_collab;
|
||||
alter table af_collab_temp rename to af_collab;
|
||||
|
||||
-- modify embeddings to make use of new uuid columns
|
||||
alter table af_collab_embeddings
|
||||
alter column oid type uuid using oid::uuid;
|
||||
create index if not exists ix_af_collab_embeddings_oid
|
||||
on af_collab_embeddings(oid);
|
||||
|
||||
-- add foreign key constraint to af_collab_embeddings
|
||||
alter table af_collab_embeddings
|
||||
add constraint fk_af_collab_embeddings_oid foreign key (oid)
|
||||
references af_collab (oid) on delete cascade;
|
||||
alter table af_collab_embeddings drop partition_key;
|
||||
|
||||
-- add trigger for af_collab.updated_at
|
||||
create trigger set_updated_at
|
||||
before insert or update
|
||||
on af_collab
|
||||
for each row
|
||||
execute procedure update_updated_at_column();
|
||||
|
||||
-- add remaining indexes to new af_collab table (execution time: 25 sec + 25sec)
|
||||
create index if not exists idx_workspace_id_on_af_collab
|
||||
on af_collab (workspace_id);
|
||||
create index if not exists idx_af_collab_updated_at
|
||||
on af_collab (updated_at);
|
||||
|
||||
create or replace procedure af_collab_embeddings_upsert(IN p_workspace_id uuid, IN p_oid uuid, IN p_tokens_used integer, IN p_fragments af_fragment_v3[])
|
||||
language plpgsql
|
||||
as
|
||||
$$
|
||||
BEGIN
|
||||
DELETE FROM af_collab_embeddings WHERE oid = p_oid;
|
||||
INSERT INTO af_collab_embeddings (fragment_id, oid, content_type, content, embedding, indexed_at, metadata, fragment_index, embedder_type)
|
||||
SELECT
|
||||
f.fragment_id,
|
||||
p_oid,
|
||||
f.content_type,
|
||||
f.contents,
|
||||
f.embedding,
|
||||
NOW(),
|
||||
f.metadata,
|
||||
f.fragment_index,
|
||||
f.embedder_type
|
||||
FROM UNNEST(p_fragments) as f;
|
||||
|
||||
-- Update the usage tracking table
|
||||
INSERT INTO af_workspace_ai_usage(created_at, workspace_id, search_requests, search_tokens_consumed, index_tokens_consumed)
|
||||
VALUES (now()::date, p_workspace_id, 0, 0, p_tokens_used)
|
||||
ON CONFLICT (created_at, workspace_id)
|
||||
DO UPDATE SET index_tokens_consumed = af_workspace_ai_usage.index_tokens_consumed + p_tokens_used;
|
||||
END
|
||||
$$;
|
|
@ -8,6 +8,8 @@ use collab_rt_entity::user::RealtimeUser;
|
|||
pub use collab_rt_entity::RealtimeMessage;
|
||||
use serde_repr::{Deserialize_repr, Serialize_repr};
|
||||
use std::fmt::Debug;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Message, Clone)]
|
||||
#[rtype(result = "Result<(), RealtimeError>")]
|
||||
pub struct Connect {
|
||||
|
@ -49,8 +51,8 @@ pub struct ClientHttpStreamMessage {
|
|||
#[rtype(result = "Result<(), AppError>")]
|
||||
pub struct ClientHttpUpdateMessage {
|
||||
pub user: RealtimeUser,
|
||||
pub workspace_id: String,
|
||||
pub object_id: String,
|
||||
pub workspace_id: Uuid,
|
||||
pub object_id: Uuid,
|
||||
/// Encoded yrs::Update or doc state
|
||||
pub update: Bytes,
|
||||
/// If the state_vector is not None, it will calculate missing updates base on
|
||||
|
@ -65,7 +67,7 @@ pub struct ClientHttpUpdateMessage {
|
|||
#[derive(Message)]
|
||||
#[rtype(result = "Result<(), AppError>")]
|
||||
pub struct ClientGenerateEmbeddingMessage {
|
||||
pub workspace_id: String,
|
||||
pub object_id: String,
|
||||
pub workspace_id: Uuid,
|
||||
pub object_id: Uuid,
|
||||
pub return_tx: Option<tokio::sync::oneshot::Sender<Result<(), AppError>>>,
|
||||
}
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use tokio_stream::wrappers::{BroadcastStream, ReceiverStream};
|
||||
use tokio_stream::StreamExt;
|
||||
use tracing::{error, trace};
|
||||
|
||||
use access_control::collab::RealtimeAccessControl;
|
||||
use async_trait::async_trait;
|
||||
use collab_rt_entity::user::RealtimeUser;
|
||||
use collab_rt_entity::ClientCollabMessage;
|
||||
use collab_rt_entity::{MessageByObjectId, RealtimeMessage};
|
||||
use tokio_stream::wrappers::{BroadcastStream, ReceiverStream};
|
||||
use tokio_stream::StreamExt;
|
||||
use tracing::{error, trace};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::util::channel_ext::UnboundedSenderSink;
|
||||
|
||||
|
@ -55,9 +55,9 @@ impl ClientMessageRouter {
|
|||
///
|
||||
pub fn init_client_communication<T>(
|
||||
&mut self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
user: &RealtimeUser,
|
||||
object_id: &str,
|
||||
object_id: Uuid,
|
||||
access_control: Arc<dyn RealtimeAccessControl>,
|
||||
) -> (UnboundedSenderSink<T>, ReceiverStream<MessageByObjectId>)
|
||||
where
|
||||
|
@ -65,19 +65,17 @@ impl ClientMessageRouter {
|
|||
{
|
||||
let client_ws_sink = self.sink.clone();
|
||||
let mut stream_rx = BroadcastStream::new(self.stream_tx.subscribe());
|
||||
let target_object_id = object_id.to_string();
|
||||
|
||||
// Send the message to the connected websocket client. When the client receive the message,
|
||||
// it will apply the changes.
|
||||
let (client_sink_tx, mut client_sink_rx) = tokio::sync::mpsc::unbounded_channel::<T>();
|
||||
let sink_access_control = access_control.clone();
|
||||
let sink_workspace_id = workspace_id.to_string();
|
||||
let uid = user.uid;
|
||||
let client_sink = UnboundedSenderSink::<T>::new(client_sink_tx);
|
||||
tokio::spawn(async move {
|
||||
while let Some(msg) = client_sink_rx.recv().await {
|
||||
let result = sink_access_control
|
||||
.can_read_collab(&sink_workspace_id, &uid, &target_object_id)
|
||||
.can_read_collab(&workspace_id, &uid, &object_id)
|
||||
.await;
|
||||
match result {
|
||||
Ok(is_allowed) => {
|
||||
|
@ -85,7 +83,7 @@ impl ClientMessageRouter {
|
|||
let rt_msg = msg.into();
|
||||
client_ws_sink.do_send(rt_msg);
|
||||
} else {
|
||||
trace!("user:{} is not allowed to read {}", uid, target_object_id);
|
||||
trace!("user:{} is not allowed to read {}", uid, object_id);
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
},
|
||||
|
@ -96,14 +94,13 @@ impl ClientMessageRouter {
|
|||
}
|
||||
}
|
||||
});
|
||||
let target_object_id = object_id.to_string();
|
||||
let stream_workspace_id = workspace_id.to_string();
|
||||
let user = user.clone();
|
||||
// stream_rx continuously receive messages from the websocket client and then
|
||||
// forward the message to the subscriber which is the broadcast channel [CollabBroadcast].
|
||||
let (client_msg_rx, rx) = tokio::sync::mpsc::channel(100);
|
||||
let client_stream = ReceiverStream::new(rx);
|
||||
tokio::spawn(async move {
|
||||
let target_object_id = object_id.to_string();
|
||||
while let Some(Ok(messages_by_oid)) = stream_rx.next().await {
|
||||
for (message_object_id, original_messages) in messages_by_oid.into_inner() {
|
||||
// if the message is not for the target object, skip it. The stream_rx receives different
|
||||
|
@ -116,9 +113,9 @@ impl ClientMessageRouter {
|
|||
// valid_messages contains the messages that the user is allowed to apply
|
||||
// invalid_message contains the messages that the user is not allowed to apply
|
||||
let (valid_messages, invalid_message) = Self::access_control(
|
||||
&stream_workspace_id,
|
||||
&workspace_id,
|
||||
&user.uid,
|
||||
&message_object_id,
|
||||
&object_id,
|
||||
access_control.clone(),
|
||||
original_messages,
|
||||
)
|
||||
|
@ -164,9 +161,9 @@ impl ClientMessageRouter {
|
|||
|
||||
#[inline]
|
||||
async fn access_control(
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
access_control: Arc<dyn RealtimeAccessControl>,
|
||||
messages: Vec<ClientCollabMessage>,
|
||||
) -> (Vec<ClientCollabMessage>, Vec<ClientCollabMessage>) {
|
||||
|
|
|
@ -1,14 +1,13 @@
|
|||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::collab::cache::CollabCache;
|
||||
use access_control::act::Action;
|
||||
use access_control::collab::CollabAccessControl;
|
||||
use access_control::workspace::WorkspaceAccessControl;
|
||||
use app_error::AppError;
|
||||
use async_trait::async_trait;
|
||||
use database::collab::CollabStorageAccessControl;
|
||||
use database_entity::dto::AFAccessLevel;
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CollabStorageAccessControlImpl {
|
||||
|
@ -22,7 +21,7 @@ impl CollabStorageAccessControl for CollabStorageAccessControlImpl {
|
|||
async fn update_policy(
|
||||
&self,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
level: AFAccessLevel,
|
||||
) -> Result<(), AppError> {
|
||||
self
|
||||
|
@ -33,9 +32,9 @@ impl CollabStorageAccessControl for CollabStorageAccessControlImpl {
|
|||
|
||||
async fn enforce_read_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
) -> Result<(), AppError> {
|
||||
let collab_exists = self.cache.is_exist(workspace_id, oid).await?;
|
||||
if !collab_exists {
|
||||
|
@ -51,9 +50,9 @@ impl CollabStorageAccessControl for CollabStorageAccessControlImpl {
|
|||
|
||||
async fn enforce_write_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &str,
|
||||
oid: &Uuid,
|
||||
) -> Result<(), AppError> {
|
||||
let collab_exists = self.cache.is_exist(workspace_id, oid).await?;
|
||||
if !collab_exists {
|
||||
|
@ -67,15 +66,19 @@ impl CollabStorageAccessControl for CollabStorageAccessControlImpl {
|
|||
.await
|
||||
}
|
||||
|
||||
async fn enforce_write_workspace(&self, uid: &i64, workspace_id: &str) -> Result<(), AppError> {
|
||||
let workspace_id = Uuid::parse_str(workspace_id)?;
|
||||
async fn enforce_write_workspace(&self, uid: &i64, workspace_id: &Uuid) -> Result<(), AppError> {
|
||||
self
|
||||
.workspace_access_control
|
||||
.enforce_action(uid, workspace_id, Action::Write)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn enforce_delete(&self, workspace_id: &str, uid: &i64, oid: &str) -> Result<(), AppError> {
|
||||
async fn enforce_delete(
|
||||
&self,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
oid: &Uuid,
|
||||
) -> Result<(), AppError> {
|
||||
self
|
||||
.collab_access_control
|
||||
.enforce_access_level(workspace_id, uid, oid, AFAccessLevel::FullAccess)
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
use super::disk_cache::CollabDiskCache;
|
||||
use super::mem_cache::{cache_exp_secs_from_collab_type, CollabMemCache};
|
||||
use crate::CollabMetrics;
|
||||
use app_error::AppError;
|
||||
use bytes::Bytes;
|
||||
use collab::entity::EncodedCollab;
|
||||
use collab_entity::CollabType;
|
||||
use database::file::s3_client_impl::AwsS3BucketClientImpl;
|
||||
use database_entity::dto::{CollabParams, PendingCollabWrite, QueryCollab, QueryCollabResult};
|
||||
use futures_util::{stream, StreamExt};
|
||||
use itertools::{Either, Itertools};
|
||||
use sqlx::{PgPool, Transaction};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tracing::{error, event, Level};
|
||||
|
||||
use super::disk_cache::CollabDiskCache;
|
||||
use super::mem_cache::{cache_exp_secs_from_collab_type, CollabMemCache};
|
||||
use crate::CollabMetrics;
|
||||
use app_error::AppError;
|
||||
use database::file::s3_client_impl::AwsS3BucketClientImpl;
|
||||
use database_entity::dto::{CollabParams, PendingCollabWrite, QueryCollab, QueryCollabResult};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CollabCache {
|
||||
|
@ -48,7 +48,7 @@ impl CollabCache {
|
|||
|
||||
pub async fn bulk_insert_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params_list: Vec<CollabParams>,
|
||||
) -> Result<(), AppError> {
|
||||
|
@ -86,7 +86,7 @@ impl CollabCache {
|
|||
|
||||
pub async fn get_encode_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
query: QueryCollab,
|
||||
) -> Result<EncodedCollab, AppError> {
|
||||
// Attempt to retrieve encoded collab from memory cache, falling back to disk cache if necessary.
|
||||
|
@ -100,7 +100,7 @@ impl CollabCache {
|
|||
}
|
||||
|
||||
// Retrieve from disk cache as fallback. After retrieval, the value is inserted into the memory cache.
|
||||
let object_id = query.object_id.clone();
|
||||
let object_id = query.object_id;
|
||||
let expiration_secs = cache_exp_secs_from_collab_type(&query.collab_type);
|
||||
let encode_collab = self
|
||||
.disk_cache
|
||||
|
@ -123,9 +123,9 @@ impl CollabCache {
|
|||
/// returns a hashmap of the object_id to the encoded collab data.
|
||||
pub async fn batch_get_encode_collab<T: Into<QueryCollab>>(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
queries: Vec<T>,
|
||||
) -> HashMap<String, QueryCollabResult> {
|
||||
) -> HashMap<Uuid, QueryCollabResult> {
|
||||
let queries = queries.into_iter().map(Into::into).collect::<Vec<_>>();
|
||||
let mut results = HashMap::new();
|
||||
// 1. Processes valid queries against the in-memory cache to retrieve cached values.
|
||||
|
@ -139,7 +139,7 @@ impl CollabCache {
|
|||
{
|
||||
None => Either::Left(params),
|
||||
Some(data) => Either::Right((
|
||||
params.object_id.clone(),
|
||||
params.object_id,
|
||||
QueryCollabResult::Success {
|
||||
encode_collab_v1: data,
|
||||
},
|
||||
|
@ -166,13 +166,13 @@ impl CollabCache {
|
|||
/// The data is inserted into both the memory and disk cache.
|
||||
pub async fn insert_encode_collab_data(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
transaction: &mut Transaction<'_, sqlx::Postgres>,
|
||||
) -> Result<(), AppError> {
|
||||
let collab_type = params.collab_type;
|
||||
let object_id = params.object_id.clone();
|
||||
let object_id = params.object_id;
|
||||
let encode_collab_data = params.encoded_collab_v1.clone();
|
||||
let s3 = self.disk_cache.s3_client();
|
||||
CollabDiskCache::upsert_collab_with_transaction(
|
||||
|
@ -192,7 +192,7 @@ impl CollabCache {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn cache_collab(&self, object_id: String, collab_type: CollabType, encode_collab_data: Bytes) {
|
||||
fn cache_collab(&self, object_id: Uuid, collab_type: CollabType, encode_collab_data: Bytes) {
|
||||
let mem_cache = self.mem_cache.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = mem_cache
|
||||
|
@ -214,7 +214,7 @@ impl CollabCache {
|
|||
|
||||
pub async fn insert_encode_collab_to_disk(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
) -> Result<(), AppError> {
|
||||
|
@ -227,7 +227,7 @@ impl CollabCache {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_collab(&self, workspace_id: &str, object_id: &str) -> Result<(), AppError> {
|
||||
pub async fn delete_collab(&self, workspace_id: &Uuid, object_id: &Uuid) -> Result<(), AppError> {
|
||||
self.mem_cache.remove_encode_collab(object_id).await?;
|
||||
self
|
||||
.disk_cache
|
||||
|
@ -236,7 +236,7 @@ impl CollabCache {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn is_exist(&self, workspace_id: &str, oid: &str) -> Result<bool, AppError> {
|
||||
pub async fn is_exist(&self, workspace_id: &Uuid, oid: &Uuid) -> Result<bool, AppError> {
|
||||
if let Ok(value) = self.mem_cache.is_exist(oid).await {
|
||||
if value {
|
||||
return Ok(value);
|
||||
|
@ -255,7 +255,7 @@ impl CollabCache {
|
|||
.iter()
|
||||
.map(|r| {
|
||||
(
|
||||
r.params.object_id.clone(),
|
||||
r.params.object_id,
|
||||
r.params.encoded_collab_v1.clone(),
|
||||
cache_exp_secs_from_collab_type(&r.params.collab_type),
|
||||
)
|
||||
|
|
|
@ -1,18 +1,9 @@
|
|||
use anyhow::{anyhow, Context};
|
||||
use bytes::Bytes;
|
||||
use collab::entity::{EncodedCollab, EncoderVersion};
|
||||
use sqlx::{Error, PgPool, Transaction};
|
||||
use std::collections::HashMap;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::task::JoinSet;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{error, instrument};
|
||||
|
||||
use crate::collab::cache::encode_collab_from_bytes;
|
||||
use crate::CollabMetrics;
|
||||
use anyhow::{anyhow, Context};
|
||||
use app_error::AppError;
|
||||
use bytes::Bytes;
|
||||
use collab::entity::{EncodedCollab, EncoderVersion};
|
||||
use database::collab::{
|
||||
batch_select_collab_blob, insert_into_af_collab, insert_into_af_collab_bulk_for_user,
|
||||
is_collab_exists, select_blob_from_af_collab, AppResult,
|
||||
|
@ -22,6 +13,15 @@ use database::file::{BucketClient, ResponseBlob};
|
|||
use database_entity::dto::{
|
||||
CollabParams, PendingCollabWrite, QueryCollab, QueryCollabResult, ZSTD_COMPRESSION_LEVEL,
|
||||
};
|
||||
use sqlx::{Error, PgPool, Transaction};
|
||||
use std::collections::HashMap;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::task::JoinSet;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{error, instrument};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CollabDiskCache {
|
||||
|
@ -46,7 +46,7 @@ impl CollabDiskCache {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn is_exist(&self, workspace_id: &str, object_id: &str) -> AppResult<bool> {
|
||||
pub async fn is_exist(&self, workspace_id: &Uuid, object_id: &Uuid) -> AppResult<bool> {
|
||||
let dir = collab_key_prefix(workspace_id, object_id);
|
||||
let resp = self.s3.list_dir(&dir, 1).await?;
|
||||
if resp.is_empty() {
|
||||
|
@ -59,7 +59,7 @@ impl CollabDiskCache {
|
|||
|
||||
pub async fn upsert_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
) -> AppResult<()> {
|
||||
|
@ -100,7 +100,7 @@ impl CollabDiskCache {
|
|||
}
|
||||
|
||||
pub async fn upsert_collab_with_transaction(
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
mut params: CollabParams,
|
||||
transaction: &mut Transaction<'_, sqlx::Postgres>,
|
||||
|
@ -133,7 +133,7 @@ impl CollabDiskCache {
|
|||
#[instrument(level = "trace", skip_all)]
|
||||
pub async fn get_collab_encoded_from_disk(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
query: QueryCollab,
|
||||
) -> Result<EncodedCollab, AppError> {
|
||||
tracing::debug!("try get {}:{} from s3", query.collab_type, query.object_id);
|
||||
|
@ -205,7 +205,7 @@ impl CollabDiskCache {
|
|||
//FIXME: this and `batch_insert_collab` duplicate similar logic.
|
||||
pub async fn bulk_insert_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
mut params_list: Vec<CollabParams>,
|
||||
) -> Result<(), AppError> {
|
||||
|
@ -216,7 +216,7 @@ impl CollabDiskCache {
|
|||
let mut delete_from_s3 = Vec::new();
|
||||
let mut blobs = HashMap::new();
|
||||
for param in params_list.iter_mut() {
|
||||
let key = collab_key(workspace_id, ¶m.object_id);
|
||||
let key = collab_key(&workspace_id, ¶m.object_id);
|
||||
if param.encoded_collab_v1.len() > self.s3_collab_threshold {
|
||||
let blob = std::mem::take(&mut param.encoded_collab_v1);
|
||||
blobs.insert(key, blob);
|
||||
|
@ -315,9 +315,9 @@ impl CollabDiskCache {
|
|||
|
||||
pub async fn batch_get_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
queries: Vec<QueryCollab>,
|
||||
) -> HashMap<String, QueryCollabResult> {
|
||||
) -> HashMap<Uuid, QueryCollabResult> {
|
||||
let mut results = HashMap::new();
|
||||
let not_found = batch_get_collab_from_s3(&self.s3, workspace_id, queries, &mut results).await;
|
||||
let s3_fetch = results.len() as u64;
|
||||
|
@ -328,7 +328,7 @@ impl CollabDiskCache {
|
|||
results
|
||||
}
|
||||
|
||||
pub async fn delete_collab(&self, workspace_id: &str, object_id: &str) -> AppResult<()> {
|
||||
pub async fn delete_collab(&self, workspace_id: &Uuid, object_id: &Uuid) -> AppResult<()> {
|
||||
sqlx::query!(
|
||||
r#"
|
||||
UPDATE af_collab
|
||||
|
@ -420,19 +420,19 @@ async fn batch_put_collab_to_s3(
|
|||
|
||||
async fn batch_get_collab_from_s3(
|
||||
s3: &AwsS3BucketClientImpl,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
params: Vec<QueryCollab>,
|
||||
results: &mut HashMap<String, QueryCollabResult>,
|
||||
results: &mut HashMap<Uuid, QueryCollabResult>,
|
||||
) -> Vec<QueryCollab> {
|
||||
enum GetResult {
|
||||
Found(String, Vec<u8>),
|
||||
Found(Uuid, Vec<u8>),
|
||||
NotFound(QueryCollab),
|
||||
Error(String, String),
|
||||
Error(Uuid, String),
|
||||
}
|
||||
|
||||
async fn gather(
|
||||
join_set: &mut JoinSet<GetResult>,
|
||||
results: &mut HashMap<String, QueryCollabResult>,
|
||||
results: &mut HashMap<Uuid, QueryCollabResult>,
|
||||
not_found: &mut Vec<QueryCollab>,
|
||||
) {
|
||||
while let Some(result) = join_set.join_next().await {
|
||||
|
@ -499,11 +499,11 @@ async fn batch_get_collab_from_s3(
|
|||
not_found
|
||||
}
|
||||
|
||||
fn collab_key_prefix(workspace_id: &str, object_id: &str) -> String {
|
||||
fn collab_key_prefix(workspace_id: &Uuid, object_id: &Uuid) -> String {
|
||||
format!("collabs/{}/{}/", workspace_id, object_id)
|
||||
}
|
||||
|
||||
fn collab_key(workspace_id: &str, object_id: &str) -> String {
|
||||
fn collab_key(workspace_id: &Uuid, object_id: &Uuid) -> String {
|
||||
format!(
|
||||
"collabs/{}/{}/encoded_collab.v1.zstd",
|
||||
workspace_id, object_id
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
use crate::collab::cache::encode_collab_from_bytes;
|
||||
use crate::CollabMetrics;
|
||||
use anyhow::anyhow;
|
||||
use app_error::AppError;
|
||||
use collab::entity::EncodedCollab;
|
||||
use collab_entity::CollabType;
|
||||
use database::collab::CollabMetadata;
|
||||
use redis::{pipe, AsyncCommands};
|
||||
use std::sync::Arc;
|
||||
use tracing::{error, instrument, trace};
|
||||
|
||||
use crate::collab::cache::encode_collab_from_bytes;
|
||||
use crate::CollabMetrics;
|
||||
use app_error::AppError;
|
||||
use database::collab::CollabMetadata;
|
||||
use uuid::Uuid;
|
||||
|
||||
const SEVEN_DAYS: u64 = 604800;
|
||||
const ONE_MONTH: u64 = 2592000;
|
||||
|
@ -43,7 +43,7 @@ impl CollabMemCache {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_collab_meta(&self, object_id: &str) -> Result<CollabMetadata, AppError> {
|
||||
pub async fn get_collab_meta(&self, object_id: &Uuid) -> Result<CollabMetadata, AppError> {
|
||||
let key = collab_meta_key(object_id);
|
||||
let value: Option<String> = self
|
||||
.connection_manager
|
||||
|
@ -66,7 +66,7 @@ impl CollabMemCache {
|
|||
}
|
||||
|
||||
/// Checks if an object with the given ID exists in the cache.
|
||||
pub async fn is_exist(&self, object_id: &str) -> Result<bool, AppError> {
|
||||
pub async fn is_exist(&self, object_id: &Uuid) -> Result<bool, AppError> {
|
||||
let cache_object_id = encode_collab_key(object_id);
|
||||
let exists: bool = self
|
||||
.connection_manager
|
||||
|
@ -77,7 +77,7 @@ impl CollabMemCache {
|
|||
Ok(exists)
|
||||
}
|
||||
|
||||
pub async fn remove_encode_collab(&self, object_id: &str) -> Result<(), AppError> {
|
||||
pub async fn remove_encode_collab(&self, object_id: &Uuid) -> Result<(), AppError> {
|
||||
let cache_object_id = encode_collab_key(object_id);
|
||||
self
|
||||
.connection_manager
|
||||
|
@ -92,7 +92,7 @@ impl CollabMemCache {
|
|||
})
|
||||
}
|
||||
|
||||
pub async fn get_encode_collab_data(&self, object_id: &str) -> Option<Vec<u8>> {
|
||||
pub async fn get_encode_collab_data(&self, object_id: &Uuid) -> Option<Vec<u8>> {
|
||||
match self.get_data_with_timestamp(object_id).await {
|
||||
Ok(None) => None,
|
||||
Ok(Some((_, bytes))) => Some(bytes),
|
||||
|
@ -104,7 +104,7 @@ impl CollabMemCache {
|
|||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
pub async fn get_encode_collab(&self, object_id: &str) -> Option<EncodedCollab> {
|
||||
pub async fn get_encode_collab(&self, object_id: &Uuid) -> Option<EncodedCollab> {
|
||||
match self.get_encode_collab_data(object_id).await {
|
||||
Some(bytes) => encode_collab_from_bytes(bytes).await.ok(),
|
||||
None => {
|
||||
|
@ -120,7 +120,7 @@ impl CollabMemCache {
|
|||
#[instrument(level = "trace", skip_all, fields(object_id=%object_id))]
|
||||
pub async fn insert_encode_collab(
|
||||
&self,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
encoded_collab: EncodedCollab,
|
||||
timestamp: i64,
|
||||
expiration_seconds: u64,
|
||||
|
@ -149,7 +149,7 @@ impl CollabMemCache {
|
|||
/// if the expiration_seconds is None, the data will be expired after 7 days.
|
||||
pub async fn insert_encode_collab_data(
|
||||
&self,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
data: &[u8],
|
||||
timestamp: i64,
|
||||
expiration_seconds: Option<u64>,
|
||||
|
@ -175,7 +175,7 @@ impl CollabMemCache {
|
|||
/// A Redis result indicating the success or failure of the operation.
|
||||
async fn insert_data_with_timestamp(
|
||||
&self,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
data: &[u8],
|
||||
timestamp: i64,
|
||||
expiration_seconds: Option<u64>,
|
||||
|
@ -257,7 +257,7 @@ impl CollabMemCache {
|
|||
/// The function returns `Ok(None)` if no data is found for the given `object_id`.
|
||||
async fn get_data_with_timestamp(
|
||||
&self,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
) -> redis::RedisResult<Option<(i64, Vec<u8>)>> {
|
||||
let cache_object_id = encode_collab_key(object_id);
|
||||
let mut conn = self.connection_manager.clone();
|
||||
|
@ -296,12 +296,12 @@ impl CollabMemCache {
|
|||
/// changing the prefix, allowing the old data to expire naturally.
|
||||
///
|
||||
#[inline]
|
||||
fn encode_collab_key(object_id: &str) -> String {
|
||||
fn encode_collab_key(object_id: &Uuid) -> String {
|
||||
format!("encode_collab_v0:{}", object_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn collab_meta_key(object_id: &str) -> String {
|
||||
fn collab_meta_key(object_id: &Uuid) -> String {
|
||||
format!("collab_meta_v0:{}", object_id)
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ where
|
|||
|
||||
async fn insert_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
) -> AppResult<()> {
|
||||
|
@ -106,7 +106,7 @@ where
|
|||
|
||||
async fn check_write_workspace_permission(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
) -> Result<(), AppError> {
|
||||
// If the collab doesn't exist, check if the user has enough permissions to create collab.
|
||||
|
@ -120,9 +120,9 @@ where
|
|||
|
||||
async fn check_write_collab_permission(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
uid: &i64,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
) -> Result<(), AppError> {
|
||||
// If the collab already exists, check if the user has enough permissions to update collab
|
||||
self
|
||||
|
@ -131,8 +131,7 @@ where
|
|||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn get_encode_collab_from_editing(&self, oid: &str) -> Option<EncodedCollab> {
|
||||
let object_id = oid.to_string();
|
||||
async fn get_encode_collab_from_editing(&self, object_id: Uuid) -> Option<EncodedCollab> {
|
||||
let (ret, rx) = tokio::sync::oneshot::channel();
|
||||
let timeout_duration = Duration::from_secs(5);
|
||||
|
||||
|
@ -153,20 +152,20 @@ where
|
|||
match timeout(timeout_duration, rx).await {
|
||||
Ok(Ok(Some(encode_collab))) => Some(encode_collab),
|
||||
Ok(Ok(None)) => {
|
||||
trace!("Editing collab not found: `{}`", oid);
|
||||
trace!("Editing collab not found: `{}`", object_id);
|
||||
None
|
||||
},
|
||||
Ok(Err(err)) => {
|
||||
error!(
|
||||
"Failed to get collab from realtime server `{}`: {}",
|
||||
oid, err
|
||||
object_id, err
|
||||
);
|
||||
None
|
||||
},
|
||||
Err(_) => {
|
||||
error!(
|
||||
"Timeout trying to read collab `{}` from realtime server",
|
||||
oid
|
||||
object_id
|
||||
);
|
||||
None
|
||||
},
|
||||
|
@ -175,8 +174,8 @@ where
|
|||
|
||||
async fn batch_get_encode_collab_from_editing(
|
||||
&self,
|
||||
object_ids: Vec<String>,
|
||||
) -> HashMap<String, EncodedCollab> {
|
||||
object_ids: Vec<Uuid>,
|
||||
) -> HashMap<Uuid, EncodedCollab> {
|
||||
let (ret, rx) = tokio::sync::oneshot::channel();
|
||||
let timeout_duration = Duration::from_secs(10);
|
||||
|
||||
|
@ -209,7 +208,7 @@ where
|
|||
|
||||
async fn queue_insert_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
) -> Result<(), AppError> {
|
||||
|
@ -225,7 +224,7 @@ where
|
|||
)));
|
||||
}
|
||||
|
||||
let pending = PendingCollabWrite::new(workspace_id.into(), *uid, params);
|
||||
let pending = PendingCollabWrite::new(workspace_id, *uid, params);
|
||||
if let Err(e) = self.queue.send(pending).await {
|
||||
error!("Failed to queue insert collab doc state: {}", e);
|
||||
}
|
||||
|
@ -234,7 +233,7 @@ where
|
|||
|
||||
async fn batch_insert_collabs(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params_list: Vec<CollabParams>,
|
||||
) -> Result<(), AppError> {
|
||||
|
@ -250,7 +249,7 @@ where
|
|||
/// * `collab_messages` - The list of collab messages to broadcast.
|
||||
pub async fn broadcast_encode_collab(
|
||||
&self,
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
collab_messages: Vec<ClientCollabMessage>,
|
||||
) -> Result<(), AppError> {
|
||||
let (sender, recv) = tokio::sync::oneshot::channel();
|
||||
|
@ -291,22 +290,25 @@ where
|
|||
{
|
||||
async fn queue_insert_or_update_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
flush_to_disk: bool,
|
||||
) -> AppResult<()> {
|
||||
params.validate()?;
|
||||
let is_exist = self.cache.is_exist(workspace_id, ¶ms.object_id).await?;
|
||||
let is_exist = self
|
||||
.cache
|
||||
.is_exist(&workspace_id, ¶ms.object_id)
|
||||
.await?;
|
||||
// If the collab already exists, check if the user has enough permissions to update collab
|
||||
// Otherwise, check if the user has enough permissions to create collab.
|
||||
if is_exist {
|
||||
self
|
||||
.check_write_collab_permission(workspace_id, uid, ¶ms.object_id)
|
||||
.check_write_collab_permission(&workspace_id, uid, ¶ms.object_id)
|
||||
.await?;
|
||||
} else {
|
||||
self
|
||||
.check_write_workspace_permission(workspace_id, uid)
|
||||
.check_write_workspace_permission(&workspace_id, uid)
|
||||
.await?;
|
||||
trace!(
|
||||
"Update policy for user:{} to create collab:{}",
|
||||
|
@ -319,7 +321,7 @@ where
|
|||
.await?;
|
||||
}
|
||||
if flush_to_disk {
|
||||
self.insert_collab(workspace_id, uid, params).await?;
|
||||
self.insert_collab(&workspace_id, uid, params).await?;
|
||||
} else {
|
||||
self.queue_insert_collab(workspace_id, uid, params).await?;
|
||||
}
|
||||
|
@ -328,12 +330,12 @@ where
|
|||
|
||||
async fn batch_insert_new_collab(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params_list: Vec<CollabParams>,
|
||||
) -> AppResult<()> {
|
||||
self
|
||||
.check_write_workspace_permission(workspace_id, uid)
|
||||
.check_write_workspace_permission(&workspace_id, uid)
|
||||
.await?;
|
||||
|
||||
// TODO(nathan): batch insert permission
|
||||
|
@ -362,7 +364,7 @@ where
|
|||
#[allow(clippy::blocks_in_conditions)]
|
||||
async fn upsert_new_collab_with_transaction(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: &i64,
|
||||
params: CollabParams,
|
||||
transaction: &mut Transaction<'_, sqlx::Postgres>,
|
||||
|
@ -370,7 +372,7 @@ where
|
|||
) -> AppResult<()> {
|
||||
params.validate()?;
|
||||
self
|
||||
.check_write_workspace_permission(workspace_id, uid)
|
||||
.check_write_workspace_permission(&workspace_id, uid)
|
||||
.await?;
|
||||
self
|
||||
.access_control
|
||||
|
@ -381,7 +383,7 @@ where
|
|||
Duration::from_secs(120),
|
||||
self
|
||||
.cache
|
||||
.insert_encode_collab_data(workspace_id, uid, params, transaction),
|
||||
.insert_encode_collab_data(&workspace_id, uid, params, transaction),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
@ -419,7 +421,7 @@ where
|
|||
// Early return if editing collab is initialized, as it indicates no need to query further.
|
||||
if from_editing_collab {
|
||||
// Attempt to retrieve encoded collab from the editing collab
|
||||
if let Some(value) = self.get_encode_collab_from_editing(¶ms.object_id).await {
|
||||
if let Some(value) = self.get_encode_collab_from_editing(params.object_id).await {
|
||||
trace!(
|
||||
"Did get encode collab {} from editing collab",
|
||||
params.object_id
|
||||
|
@ -438,10 +440,10 @@ where
|
|||
async fn batch_get_collab(
|
||||
&self,
|
||||
_uid: &i64,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
queries: Vec<QueryCollab>,
|
||||
from_editing_collab: bool,
|
||||
) -> HashMap<String, QueryCollabResult> {
|
||||
) -> HashMap<Uuid, QueryCollabResult> {
|
||||
if queries.is_empty() {
|
||||
return HashMap::new();
|
||||
}
|
||||
|
@ -462,14 +464,9 @@ where
|
|||
let cache_queries = if from_editing_collab {
|
||||
let editing_queries = valid_queries.clone();
|
||||
let editing_results = self
|
||||
.batch_get_encode_collab_from_editing(
|
||||
editing_queries
|
||||
.iter()
|
||||
.map(|q| q.object_id.clone())
|
||||
.collect(),
|
||||
)
|
||||
.batch_get_encode_collab_from_editing(editing_queries.iter().map(|q| q.object_id).collect())
|
||||
.await;
|
||||
let editing_query_collab_results: HashMap<String, QueryCollabResult> =
|
||||
let editing_query_collab_results: HashMap<Uuid, QueryCollabResult> =
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let par_iter = editing_results.into_par_iter();
|
||||
par_iter
|
||||
|
@ -484,13 +481,13 @@ where
|
|||
},
|
||||
};
|
||||
|
||||
(object_id.clone(), query_collab_result)
|
||||
(object_id, query_collab_result)
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let editing_object_ids: Vec<String> = editing_query_collab_results.keys().cloned().collect();
|
||||
let editing_object_ids: Vec<_> = editing_query_collab_results.keys().cloned().collect();
|
||||
results.extend(editing_query_collab_results);
|
||||
valid_queries
|
||||
.into_iter()
|
||||
|
@ -503,13 +500,13 @@ where
|
|||
results.extend(
|
||||
self
|
||||
.cache
|
||||
.batch_get_encode_collab(workspace_id, cache_queries)
|
||||
.batch_get_encode_collab(&workspace_id, cache_queries)
|
||||
.await,
|
||||
);
|
||||
results
|
||||
}
|
||||
|
||||
async fn delete_collab(&self, workspace_id: &str, uid: &i64, object_id: &str) -> AppResult<()> {
|
||||
async fn delete_collab(&self, workspace_id: &Uuid, uid: &i64, object_id: &Uuid) -> AppResult<()> {
|
||||
self
|
||||
.access_control
|
||||
.enforce_delete(workspace_id, uid, object_id)
|
||||
|
@ -518,7 +515,11 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn should_create_snapshot(&self, workspace_id: &str, oid: &str) -> Result<bool, AppError> {
|
||||
async fn should_create_snapshot(
|
||||
&self,
|
||||
workspace_id: &Uuid,
|
||||
oid: &Uuid,
|
||||
) -> Result<bool, AppError> {
|
||||
self
|
||||
.snapshot_control
|
||||
.should_create_snapshot(workspace_id, oid)
|
||||
|
@ -535,8 +536,8 @@ where
|
|||
|
||||
async fn get_collab_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
snapshot_id: &i64,
|
||||
) -> AppResult<SnapshotData> {
|
||||
self
|
||||
|
@ -547,8 +548,8 @@ where
|
|||
|
||||
async fn get_latest_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> AppResult<Option<SnapshotData>> {
|
||||
self
|
||||
|
@ -559,8 +560,8 @@ where
|
|||
|
||||
async fn get_collab_snapshot_list(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: &Uuid,
|
||||
oid: &Uuid,
|
||||
) -> AppResult<AFSnapshotMetas> {
|
||||
self
|
||||
.snapshot_control
|
||||
|
|
|
@ -15,22 +15,24 @@ use std::{
|
|||
sync::{Arc, Weak},
|
||||
};
|
||||
use tracing::error;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub type CLCommandSender = tokio::sync::mpsc::Sender<CollaborationCommand>;
|
||||
pub type CLCommandReceiver = tokio::sync::mpsc::Receiver<CollaborationCommand>;
|
||||
|
||||
pub type EncodeCollabSender = tokio::sync::oneshot::Sender<Option<EncodedCollab>>;
|
||||
pub type BatchEncodeCollabSender = tokio::sync::oneshot::Sender<HashMap<String, EncodedCollab>>;
|
||||
pub type BatchEncodeCollabSender = tokio::sync::oneshot::Sender<HashMap<Uuid, EncodedCollab>>;
|
||||
pub enum CollaborationCommand {
|
||||
GetEncodeCollab {
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
ret: EncodeCollabSender,
|
||||
},
|
||||
BatchGetEncodeCollab {
|
||||
object_ids: Vec<String>,
|
||||
object_ids: Vec<Uuid>,
|
||||
ret: BatchEncodeCollabSender,
|
||||
},
|
||||
ServerSendCollabMessage {
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
collab_messages: Vec<ClientCollabMessage>,
|
||||
ret: tokio::sync::oneshot::Sender<Result<(), RealtimeError>>,
|
||||
},
|
||||
|
@ -40,7 +42,7 @@ const BATCH_GET_ENCODE_COLLAB_CONCURRENCY: usize = 10;
|
|||
|
||||
pub(crate) fn spawn_collaboration_command<S>(
|
||||
mut command_recv: CLCommandReceiver,
|
||||
group_sender_by_object_id: &Arc<DashMap<String, GroupCommandSender>>,
|
||||
group_sender_by_object_id: &Arc<DashMap<Uuid, GroupCommandSender>>,
|
||||
weak_groups: Weak<GroupManager<S>>,
|
||||
) where
|
||||
S: CollabStorage,
|
||||
|
@ -53,10 +55,7 @@ pub(crate) fn spawn_collaboration_command<S>(
|
|||
match group_sender_by_object_id.get(&object_id) {
|
||||
Some(sender) => {
|
||||
if let Err(err) = sender
|
||||
.send(GroupCommand::EncodeCollab {
|
||||
object_id: object_id.clone(),
|
||||
ret,
|
||||
})
|
||||
.send(GroupCommand::EncodeCollab { object_id, ret })
|
||||
.await
|
||||
{
|
||||
error!("Send group command error: {}", err);
|
||||
|
@ -85,7 +84,7 @@ pub(crate) fn spawn_collaboration_command<S>(
|
|||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
let mut outputs: HashMap<String, EncodedCollab> = HashMap::new();
|
||||
let mut outputs: HashMap<_, EncodedCollab> = HashMap::new();
|
||||
for (object_id, encoded_collab) in tasks.into_iter().flatten() {
|
||||
if let Some(encoded_collab) = encoded_collab {
|
||||
outputs.insert(object_id, encoded_collab);
|
||||
|
|
|
@ -81,6 +81,9 @@ pub enum RealtimeError {
|
|||
|
||||
#[error("failed to send ws message: {0}")]
|
||||
SendWSMessageFailed(String),
|
||||
|
||||
#[error("failed to parse UUID: {0}")]
|
||||
Uuid(#[from] uuid::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
|
|
@ -20,6 +20,7 @@ use collab_rt_entity::{
|
|||
use collab_rt_protocol::{Message, SyncMessage};
|
||||
use database::collab::CollabStorage;
|
||||
use tracing::{error, instrument, trace, warn};
|
||||
use uuid::Uuid;
|
||||
use yrs::updates::encoder::Encode;
|
||||
use yrs::StateVector;
|
||||
|
||||
|
@ -30,32 +31,32 @@ use yrs::StateVector;
|
|||
pub enum GroupCommand {
|
||||
HandleClientCollabMessage {
|
||||
user: RealtimeUser,
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
collab_messages: Vec<ClientCollabMessage>,
|
||||
ret: tokio::sync::oneshot::Sender<Result<(), RealtimeError>>,
|
||||
},
|
||||
HandleClientHttpUpdate {
|
||||
user: RealtimeUser,
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
update: Bytes,
|
||||
collab_type: CollabType,
|
||||
ret: tokio::sync::oneshot::Sender<Result<(), RealtimeError>>,
|
||||
},
|
||||
EncodeCollab {
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
ret: tokio::sync::oneshot::Sender<Option<EncodedCollab>>,
|
||||
},
|
||||
HandleServerCollabMessage {
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
collab_messages: Vec<ClientCollabMessage>,
|
||||
ret: tokio::sync::oneshot::Sender<Result<(), RealtimeError>>,
|
||||
},
|
||||
GenerateCollabEmbedding {
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
},
|
||||
CalculateMissingUpdate {
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
state_vector: StateVector,
|
||||
ret: tokio::sync::oneshot::Sender<Result<Vec<u8>, RealtimeError>>,
|
||||
},
|
||||
|
@ -81,7 +82,7 @@ impl<S> GroupCommandRunner<S>
|
|||
where
|
||||
S: CollabStorage,
|
||||
{
|
||||
pub async fn run(mut self, object_id: String) {
|
||||
pub async fn run(mut self, object_id: Uuid) {
|
||||
let mut receiver = self.recv.take().expect("Only take once");
|
||||
let stream = stream! {
|
||||
while let Some(msg) = receiver.recv().await {
|
||||
|
@ -135,13 +136,7 @@ where
|
|||
ret,
|
||||
} => {
|
||||
let result = self
|
||||
.handle_client_posted_http_update(
|
||||
&user,
|
||||
&workspace_id,
|
||||
&object_id,
|
||||
collab_type,
|
||||
update,
|
||||
)
|
||||
.handle_client_posted_http_update(&user, workspace_id, object_id, collab_type, update)
|
||||
.await;
|
||||
if let Err(err) = ret.send(result) {
|
||||
warn!("Send handle client update message result fail: {:?}", err);
|
||||
|
@ -163,7 +158,7 @@ where
|
|||
let group = self.group_manager.get_group(&object_id).await;
|
||||
match group {
|
||||
None => {
|
||||
let _ = ret.send(Err(RealtimeError::GroupNotFound(object_id.clone())));
|
||||
let _ = ret.send(Err(RealtimeError::GroupNotFound(object_id.to_string())));
|
||||
},
|
||||
Some(group) => {
|
||||
let result = group.calculate_missing_update(state_vector).await;
|
||||
|
@ -191,7 +186,7 @@ where
|
|||
async fn handle_client_collab_message(
|
||||
&self,
|
||||
user: &RealtimeUser,
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
messages: Vec<ClientCollabMessage>,
|
||||
) -> Result<(), RealtimeError> {
|
||||
if messages.is_empty() {
|
||||
|
@ -260,8 +255,8 @@ where
|
|||
async fn handle_client_posted_http_update(
|
||||
&self,
|
||||
user: &RealtimeUser,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: collab_entity::CollabType,
|
||||
update: Bytes,
|
||||
) -> Result<(), RealtimeError> {
|
||||
|
@ -281,7 +276,7 @@ where
|
|||
}
|
||||
|
||||
// Create group if it's not exist
|
||||
let is_group_exist = self.group_manager.contains_group(object_id);
|
||||
let is_group_exist = self.group_manager.contains_group(&object_id);
|
||||
if !is_group_exist {
|
||||
trace!("The group:{} is not found, create a new group", object_id);
|
||||
self
|
||||
|
@ -290,7 +285,7 @@ where
|
|||
}
|
||||
|
||||
// Only subscribe when the user is not subscribed to the group
|
||||
if !self.group_manager.contains_user(object_id, user) {
|
||||
if !self.group_manager.contains_user(&object_id, user) {
|
||||
self.subscribe_group(user, object_id, &origin).await?;
|
||||
}
|
||||
if let Some(client_stream) = self.msg_router_by_user.get(user) {
|
||||
|
@ -322,7 +317,7 @@ where
|
|||
#[instrument(level = "trace", skip_all)]
|
||||
async fn handle_server_collab_messages(
|
||||
&self,
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
messages: Vec<ClientCollabMessage>,
|
||||
) -> Result<(), RealtimeError> {
|
||||
if messages.is_empty() {
|
||||
|
@ -346,7 +341,7 @@ where
|
|||
NullSender::default(),
|
||||
message_by_oid_receiver,
|
||||
);
|
||||
let message = HashMap::from([(object_id.clone(), messages)]);
|
||||
let message = HashMap::from([(object_id.to_string(), messages)]);
|
||||
if let Err(err) = message_by_oid_sender.try_send(MessageByObjectId(message)) {
|
||||
error!(
|
||||
"failed to send message to group: {}, object_id: {}",
|
||||
|
@ -363,7 +358,7 @@ where
|
|||
user: &RealtimeUser,
|
||||
collab_message: &ClientCollabMessage,
|
||||
) -> Result<(), RealtimeError> {
|
||||
let object_id = collab_message.object_id();
|
||||
let object_id = Uuid::parse_str(collab_message.object_id())?;
|
||||
let message_origin = collab_message.origin();
|
||||
self.subscribe_group(user, object_id, message_origin).await
|
||||
}
|
||||
|
@ -371,7 +366,7 @@ where
|
|||
async fn subscribe_group(
|
||||
&self,
|
||||
user: &RealtimeUser,
|
||||
object_id: &str,
|
||||
object_id: Uuid,
|
||||
collab_origin: &CollabOrigin,
|
||||
) -> Result<(), RealtimeError> {
|
||||
match self.msg_router_by_user.get_mut(user) {
|
||||
|
@ -399,11 +394,12 @@ where
|
|||
user: &RealtimeUser,
|
||||
collab_message: &ClientCollabMessage,
|
||||
) -> Result<(), RealtimeError> {
|
||||
let object_id = collab_message.object_id();
|
||||
let object_id = Uuid::parse_str(collab_message.object_id())?;
|
||||
match collab_message {
|
||||
ClientCollabMessage::ClientInitSync { data, .. } => {
|
||||
let workspace_id = Uuid::parse_str(&data.workspace_id)?;
|
||||
self
|
||||
.create_group(user, &data.workspace_id, object_id, data.collab_type)
|
||||
.create_group(user, workspace_id, object_id, data.collab_type)
|
||||
.await?;
|
||||
Ok(())
|
||||
},
|
||||
|
@ -415,8 +411,8 @@ where
|
|||
async fn create_group(
|
||||
&self,
|
||||
user: &RealtimeUser,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: collab_entity::CollabType,
|
||||
) -> Result<(), RealtimeError> {
|
||||
self
|
||||
|
@ -433,7 +429,7 @@ where
|
|||
#[inline]
|
||||
pub async fn forward_message_to_group(
|
||||
user: &RealtimeUser,
|
||||
object_id: String,
|
||||
object_id: Uuid,
|
||||
collab_messages: Vec<ClientCollabMessage>,
|
||||
client_msg_router: &Arc<DashMap<RealtimeUser, ClientMessageRouter>>,
|
||||
) {
|
||||
|
@ -448,7 +444,7 @@ pub async fn forward_message_to_group(
|
|||
.map(|v| v.msg_id())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
let message = MessageByObjectId::new_with_message(object_id, collab_messages);
|
||||
let message = MessageByObjectId::new_with_message(object_id.to_string(), collab_messages);
|
||||
let err = client_stream.stream_tx.send(message);
|
||||
if let Err(err) = err {
|
||||
warn!("Send user:{} message to group:{}", user.uid, err);
|
||||
|
|
|
@ -49,8 +49,8 @@ pub struct CollabGroup {
|
|||
/// Inner state of [CollabGroup] that's private and hidden behind Arc, so that it can be moved into
|
||||
/// tasks.
|
||||
struct CollabGroupState {
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
/// A list of subscribers to this group. Each subscriber will receive updates from the
|
||||
/// broadcast.
|
||||
|
@ -77,8 +77,8 @@ impl CollabGroup {
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn new<S>(
|
||||
uid: i64,
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
metrics: Arc<CollabRealtimeMetrics>,
|
||||
storage: Arc<S>,
|
||||
|
@ -94,8 +94,8 @@ impl CollabGroup {
|
|||
let is_new_collab = state_vector.is_empty();
|
||||
let persister = CollabPersister::new(
|
||||
uid,
|
||||
workspace_id.clone(),
|
||||
object_id.clone(),
|
||||
workspace_id,
|
||||
object_id,
|
||||
collab_type,
|
||||
storage,
|
||||
collab_redis_stream,
|
||||
|
@ -160,13 +160,13 @@ impl CollabGroup {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn workspace_id(&self) -> &str {
|
||||
pub fn workspace_id(&self) -> &Uuid {
|
||||
&self.state.workspace_id
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
pub fn object_id(&self) -> &str {
|
||||
pub fn object_id(&self) -> &Uuid {
|
||||
&self.state.object_id
|
||||
}
|
||||
|
||||
|
@ -233,7 +233,7 @@ impl CollabGroup {
|
|||
seq_num
|
||||
);
|
||||
let payload = Message::Sync(SyncMessage::Update(update.data)).encode_v1();
|
||||
let message = BroadcastSync::new(update.sender, state.object_id.clone(), payload, seq_num);
|
||||
let message = BroadcastSync::new(update.sender, state.object_id.to_string(), payload, seq_num);
|
||||
for mut e in state.subscribers.iter_mut() {
|
||||
let subscription = e.value_mut();
|
||||
if message.origin == subscription.collab_origin {
|
||||
|
@ -255,8 +255,7 @@ impl CollabGroup {
|
|||
|
||||
/// Task used to receive awareness updates from Redis.
|
||||
async fn inbound_awareness_task(state: Arc<CollabGroupState>) -> Result<(), RealtimeError> {
|
||||
let object_id = Uuid::parse_str(&state.object_id)
|
||||
.map_err(|e| RealtimeError::CollabSchemaError(format!("invalid uuid: {}", e)))?;
|
||||
let object_id = state.object_id;
|
||||
let updates = state
|
||||
.persister
|
||||
.collab_redis_stream
|
||||
|
@ -290,7 +289,7 @@ impl CollabGroup {
|
|||
);
|
||||
let sender = update.sender;
|
||||
let message = AwarenessSync::new(
|
||||
state.object_id.clone(),
|
||||
state.object_id.to_string(),
|
||||
Message::Awareness(update.data.encode_v1()).encode_v1(),
|
||||
CollabOrigin::Empty,
|
||||
);
|
||||
|
@ -355,14 +354,14 @@ impl CollabGroup {
|
|||
.map_err(|e| AppError::Internal(e.into()))?;
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
self.object_id(),
|
||||
&self.object_id().to_string(),
|
||||
DataSource::DocStateV1(collab.doc_state.into()),
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.map_err(|e| AppError::Internal(e.into()))?;
|
||||
let workspace_id = &self.state.workspace_id;
|
||||
let object_id = &self.state.object_id;
|
||||
let workspace_id = self.state.workspace_id;
|
||||
let object_id = self.state.object_id;
|
||||
let collab_type = self.state.collab_type;
|
||||
self
|
||||
.state
|
||||
|
@ -387,7 +386,7 @@ impl CollabGroup {
|
|||
let encoded_collab = self.encode_collab().await?;
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
self.object_id(),
|
||||
&self.object_id().to_string(),
|
||||
DataSource::DocStateV1(encoded_collab.doc_state.into()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -519,8 +518,9 @@ impl CollabGroup {
|
|||
where
|
||||
Sink: SubscriptionSink + 'static,
|
||||
{
|
||||
let object_id = state.object_id.to_string();
|
||||
for (message_object_id, messages) in msg.0 {
|
||||
if state.object_id != message_object_id {
|
||||
if object_id != message_object_id {
|
||||
error!(
|
||||
"Expect object id:{} but got:{}",
|
||||
state.object_id, message_object_id
|
||||
|
@ -869,8 +869,8 @@ impl Drop for Subscription {
|
|||
|
||||
struct CollabPersister {
|
||||
uid: i64,
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
storage: Arc<dyn CollabStorage>,
|
||||
collab_redis_stream: Arc<CollabRedisStream>,
|
||||
|
@ -887,8 +887,8 @@ impl CollabPersister {
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn new(
|
||||
uid: i64,
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
storage: Arc<dyn CollabStorage>,
|
||||
collab_redis_stream: Arc<CollabRedisStream>,
|
||||
|
@ -954,7 +954,12 @@ impl CollabPersister {
|
|||
let start = Instant::now();
|
||||
let mut collab = match self.load_collab_full().await? {
|
||||
Some(collab) => collab,
|
||||
None => Collab::new_with_origin(CollabOrigin::Server, self.object_id.clone(), vec![], false),
|
||||
None => Collab::new_with_origin(
|
||||
CollabOrigin::Server,
|
||||
self.object_id.to_string(),
|
||||
vec![],
|
||||
false,
|
||||
),
|
||||
};
|
||||
self.metrics.load_collab_count.inc();
|
||||
|
||||
|
@ -1016,9 +1021,12 @@ impl CollabPersister {
|
|||
if collab.is_none() {
|
||||
collab = Some(match self.load_collab_full().await? {
|
||||
Some(collab) => collab,
|
||||
None => {
|
||||
Collab::new_with_origin(CollabOrigin::Server, self.object_id.clone(), vec![], false)
|
||||
},
|
||||
None => Collab::new_with_origin(
|
||||
CollabOrigin::Server,
|
||||
self.object_id.to_string(),
|
||||
vec![],
|
||||
false,
|
||||
),
|
||||
})
|
||||
};
|
||||
let collab = collab.as_mut().unwrap();
|
||||
|
@ -1092,7 +1100,7 @@ impl CollabPersister {
|
|||
// perform snapshot at the same time, so we'll use lease to let only one of them atm.
|
||||
if let Some(mut lease) = self
|
||||
.collab_redis_stream
|
||||
.lease(&self.workspace_id, &self.object_id)
|
||||
.lease(&self.workspace_id.to_string(), &self.object_id.to_string())
|
||||
.await?
|
||||
{
|
||||
let doc_state_light = collab
|
||||
|
@ -1149,42 +1157,36 @@ impl CollabPersister {
|
|||
.metrics
|
||||
.collab_size
|
||||
.observe(encoded_collab.len() as f64);
|
||||
let params = CollabParams::new(&self.object_id, self.collab_type, encoded_collab);
|
||||
let params = CollabParams::new(self.object_id, self.collab_type, encoded_collab);
|
||||
self
|
||||
.storage
|
||||
.queue_insert_or_update_collab(&self.workspace_id, &self.uid, params, true)
|
||||
.queue_insert_or_update_collab(self.workspace_id, &self.uid, params, true)
|
||||
.await
|
||||
.map_err(|err| RealtimeError::Internal(err.into()))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn index_collab_content(&self, text: String) {
|
||||
if let Ok(workspace_id) = Uuid::parse_str(&self.workspace_id) {
|
||||
let indexed_collab = UnindexedCollabTask::new(
|
||||
workspace_id,
|
||||
self.object_id.clone(),
|
||||
self.collab_type,
|
||||
UnindexedData::Text(text),
|
||||
let indexed_collab = UnindexedCollabTask::new(
|
||||
self.workspace_id,
|
||||
self.object_id,
|
||||
self.collab_type,
|
||||
UnindexedData::Text(text),
|
||||
);
|
||||
if let Err(err) = self
|
||||
.indexer_scheduler
|
||||
.index_pending_collab_one(indexed_collab, false)
|
||||
{
|
||||
warn!(
|
||||
"failed to index collab `{}/{}`: {}",
|
||||
self.workspace_id, self.object_id, err
|
||||
);
|
||||
if let Err(err) = self
|
||||
.indexer_scheduler
|
||||
.index_pending_collab_one(indexed_collab, false)
|
||||
{
|
||||
warn!(
|
||||
"failed to index collab `{}/{}`: {}",
|
||||
self.workspace_id, self.object_id, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn load_collab_full(&self) -> Result<Option<Collab>, RealtimeError> {
|
||||
// we didn't find a snapshot, or we want a lightweight collab version
|
||||
let params = QueryCollabParams::new(
|
||||
self.object_id.clone(),
|
||||
self.collab_type,
|
||||
self.workspace_id.clone(),
|
||||
);
|
||||
let params = QueryCollabParams::new(self.object_id, self.collab_type, self.workspace_id);
|
||||
let result = self
|
||||
.storage
|
||||
.get_encode_collab(GetCollabOrigin::Server, params, false)
|
||||
|
@ -1197,7 +1199,7 @@ impl CollabPersister {
|
|||
|
||||
let collab: Collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
&self.object_id,
|
||||
&self.object_id.to_string(),
|
||||
DataSource::DocStateV1(doc_state.into()),
|
||||
vec![],
|
||||
false,
|
||||
|
|
|
@ -14,6 +14,7 @@ use collab_stream::client::CollabRedisStream;
|
|||
use database::collab::{CollabStorage, GetCollabOrigin};
|
||||
use database_entity::dto::QueryCollabParams;
|
||||
use tracing::{instrument, trace};
|
||||
use uuid::Uuid;
|
||||
use yrs::{ReadTxn, StateVector};
|
||||
|
||||
use crate::client::client_msg_router::ClientMessageRouter;
|
||||
|
@ -61,11 +62,11 @@ where
|
|||
})
|
||||
}
|
||||
|
||||
pub fn get_inactive_groups(&self) -> Vec<String> {
|
||||
pub fn get_inactive_groups(&self) -> Vec<Uuid> {
|
||||
self.state.remove_inactive_groups()
|
||||
}
|
||||
|
||||
pub fn contains_user(&self, object_id: &str, user: &RealtimeUser) -> bool {
|
||||
pub fn contains_user(&self, object_id: &Uuid, user: &RealtimeUser) -> bool {
|
||||
self.state.contains_user(object_id, user)
|
||||
}
|
||||
|
||||
|
@ -73,27 +74,27 @@ where
|
|||
self.state.remove_user(user);
|
||||
}
|
||||
|
||||
pub fn contains_group(&self, object_id: &str) -> bool {
|
||||
pub fn contains_group(&self, object_id: &Uuid) -> bool {
|
||||
self.state.contains_group(object_id)
|
||||
}
|
||||
|
||||
pub async fn get_group(&self, object_id: &str) -> Option<Arc<CollabGroup>> {
|
||||
pub async fn get_group(&self, object_id: &Uuid) -> Option<Arc<CollabGroup>> {
|
||||
self.state.get_group(object_id).await
|
||||
}
|
||||
|
||||
pub async fn subscribe_group(
|
||||
&self,
|
||||
user: &RealtimeUser,
|
||||
object_id: &str,
|
||||
object_id: Uuid,
|
||||
message_origin: &CollabOrigin,
|
||||
client_msg_router: &mut ClientMessageRouter,
|
||||
) -> Result<(), RealtimeError> {
|
||||
// Lock the group and subscribe the user to the group.
|
||||
if let Some(mut e) = self.state.get_mut_group(object_id).await {
|
||||
if let Some(mut e) = self.state.get_mut_group(&object_id).await {
|
||||
let group = e.value_mut();
|
||||
trace!("[realtime]: {} subscribe group:{}", user, object_id,);
|
||||
let (sink, stream) = client_msg_router.init_client_communication::<CollabMessage>(
|
||||
group.workspace_id(),
|
||||
*group.workspace_id(),
|
||||
user,
|
||||
object_id,
|
||||
self.access_control.clone(),
|
||||
|
@ -114,8 +115,8 @@ where
|
|||
pub async fn create_group(
|
||||
&self,
|
||||
user: &RealtimeUser,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<(), RealtimeError> {
|
||||
let params = QueryCollabParams::new(object_id, collab_type, workspace_id);
|
||||
|
@ -126,7 +127,7 @@ where
|
|||
let state_vector = match res {
|
||||
Ok(collab) => Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(collab.doc_state.into()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -147,8 +148,8 @@ where
|
|||
|
||||
let group = CollabGroup::new(
|
||||
user.uid,
|
||||
workspace_id.to_string(),
|
||||
object_id.to_string(),
|
||||
workspace_id,
|
||||
object_id,
|
||||
collab_type,
|
||||
self.metrics_calculate.clone(),
|
||||
self.storage.clone(),
|
||||
|
@ -168,7 +169,7 @@ where
|
|||
#[instrument(level = "trace", skip_all)]
|
||||
async fn load_collab<S>(
|
||||
uid: i64,
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
params: QueryCollabParams,
|
||||
storage: Arc<S>,
|
||||
) -> Result<(Collab, EncodedCollab), AppError>
|
||||
|
@ -180,7 +181,7 @@ where
|
|||
.await?;
|
||||
let result = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(encode_collab.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -194,7 +195,7 @@ where
|
|||
}
|
||||
|
||||
async fn load_collab_from_snapshot<S>(
|
||||
object_id: &str,
|
||||
object_id: &Uuid,
|
||||
params: QueryCollabParams,
|
||||
storage: Arc<S>,
|
||||
) -> Option<(Collab, EncodedCollab)>
|
||||
|
@ -210,7 +211,7 @@ where
|
|||
.await?;
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(encode_collab.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -220,8 +221,8 @@ where
|
|||
}
|
||||
|
||||
async fn get_latest_snapshot<S>(
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
storage: &S,
|
||||
collab_type: &CollabType,
|
||||
) -> Option<EncodedCollab>
|
||||
|
@ -234,14 +235,15 @@ where
|
|||
.ok()?
|
||||
.0;
|
||||
for meta in metas {
|
||||
let object_id = Uuid::parse_str(&meta.object_id).ok()?;
|
||||
let snapshot_data = storage
|
||||
.get_collab_snapshot(workspace_id, &meta.object_id, &meta.snapshot_id)
|
||||
.get_collab_snapshot(*workspace_id, object_id, &meta.snapshot_id)
|
||||
.await
|
||||
.ok()?;
|
||||
if let Ok(encoded_collab) = EncodedCollab::decode_from_bytes(&snapshot_data.encoded_collab_v1) {
|
||||
if let Ok(collab) = Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(encoded_collab.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
|
|
@ -6,6 +6,7 @@ use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
|
|||
use std::sync::{Arc, Weak};
|
||||
use tokio::time::sleep;
|
||||
use tracing::{error, trace};
|
||||
use uuid::Uuid;
|
||||
use yrs::TransactionMut;
|
||||
|
||||
use database::collab::CollabStorage;
|
||||
|
@ -13,8 +14,8 @@ use database_entity::dto::InsertSnapshotParams;
|
|||
|
||||
/// [HistoryPlugin] will be moved to history collab server. For now, it's temporarily placed here.
|
||||
pub struct HistoryPlugin<S> {
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
storage: Arc<S>,
|
||||
did_create_snapshot: AtomicBool,
|
||||
|
@ -29,8 +30,8 @@ where
|
|||
{
|
||||
#[allow(dead_code)]
|
||||
pub fn new(
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
weak_collab: Weak<RwLock<Collab>>,
|
||||
storage: Arc<S>,
|
||||
|
@ -51,8 +52,8 @@ where
|
|||
async fn enqueue_snapshot(
|
||||
weak_collab: Weak<RwLock<Collab>>,
|
||||
storage: Arc<S>,
|
||||
workspace_id: String,
|
||||
object_id: String,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
trace!("trying to enqueue snapshot for object_id: {}", object_id);
|
||||
|
@ -97,9 +98,9 @@ where
|
|||
let storage = self.storage.clone();
|
||||
let weak_collab = self.weak_collab.clone();
|
||||
let collab_type = self.collab_type;
|
||||
let object_id = self.object_id.clone();
|
||||
let workspace_id = self.workspace_id.clone();
|
||||
|
||||
let workspace_id = self.workspace_id;
|
||||
let object_id = self.object_id;
|
||||
tokio::spawn(async move {
|
||||
sleep(std::time::Duration::from_secs(2)).await;
|
||||
match storage
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
use crate::config::get_env_var;
|
||||
use crate::error::RealtimeError;
|
||||
use crate::group::group_init::CollabGroup;
|
||||
use crate::metrics::CollabRealtimeMetrics;
|
||||
use collab_rt_entity::user::RealtimeUser;
|
||||
use dashmap::mapref::one::RefMut;
|
||||
use dashmap::try_result::TryResult;
|
||||
use dashmap::DashMap;
|
||||
|
@ -6,16 +11,11 @@ use std::sync::Arc;
|
|||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{error, event, trace, warn};
|
||||
|
||||
use crate::config::get_env_var;
|
||||
use crate::error::RealtimeError;
|
||||
use crate::group::group_init::CollabGroup;
|
||||
use crate::metrics::CollabRealtimeMetrics;
|
||||
use collab_rt_entity::user::RealtimeUser;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct GroupManagementState {
|
||||
group_by_object_id: Arc<DashMap<String, Arc<CollabGroup>>>,
|
||||
group_by_object_id: Arc<DashMap<Uuid, Arc<CollabGroup>>>,
|
||||
/// Keep track of all [Collab] objects that a user is subscribed to.
|
||||
editing_by_user: Arc<DashMap<RealtimeUser, HashSet<Editing>>>,
|
||||
metrics_calculate: Arc<CollabRealtimeMetrics>,
|
||||
|
@ -37,12 +37,12 @@ impl GroupManagementState {
|
|||
}
|
||||
|
||||
/// Returns group ids of inactive groups.
|
||||
pub fn remove_inactive_groups(&self) -> Vec<String> {
|
||||
pub fn remove_inactive_groups(&self) -> Vec<Uuid> {
|
||||
let mut inactive_group_ids = vec![];
|
||||
for entry in self.group_by_object_id.iter() {
|
||||
let (object_id, group) = (entry.key(), entry.value());
|
||||
if group.is_inactive() {
|
||||
inactive_group_ids.push(object_id.clone());
|
||||
inactive_group_ids.push(*object_id);
|
||||
if inactive_group_ids.len() > self.remove_batch_size {
|
||||
break;
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ impl GroupManagementState {
|
|||
inactive_group_ids
|
||||
}
|
||||
|
||||
pub async fn get_group(&self, object_id: &str) -> Option<Arc<CollabGroup>> {
|
||||
pub async fn get_group(&self, object_id: &Uuid) -> Option<Arc<CollabGroup>> {
|
||||
let mut attempts = 0;
|
||||
let max_attempts = 3;
|
||||
let retry_delay = Duration::from_millis(100);
|
||||
|
@ -85,8 +85,8 @@ impl GroupManagementState {
|
|||
/// may deadlock when holding the RefMut and trying to read group_by_object_id.
|
||||
pub(crate) async fn get_mut_group(
|
||||
&self,
|
||||
object_id: &str,
|
||||
) -> Option<RefMut<String, Arc<CollabGroup>>> {
|
||||
object_id: &Uuid,
|
||||
) -> Option<RefMut<Uuid, Arc<CollabGroup>>> {
|
||||
let mut attempts = 0;
|
||||
let max_attempts = 3;
|
||||
let retry_delay = Duration::from_millis(300);
|
||||
|
@ -108,14 +108,12 @@ impl GroupManagementState {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn insert_group(&self, object_id: &str, group: CollabGroup) {
|
||||
self
|
||||
.group_by_object_id
|
||||
.insert(object_id.to_string(), group.into());
|
||||
pub(crate) fn insert_group(&self, object_id: Uuid, group: CollabGroup) {
|
||||
self.group_by_object_id.insert(object_id, group.into());
|
||||
self.metrics_calculate.opening_collab_count.inc();
|
||||
}
|
||||
|
||||
pub(crate) fn contains_group(&self, object_id: &str) -> bool {
|
||||
pub(crate) fn contains_group(&self, object_id: &Uuid) -> bool {
|
||||
if let Some(group) = self.group_by_object_id.get(object_id) {
|
||||
let cancelled = group.is_cancelled();
|
||||
!cancelled
|
||||
|
@ -124,7 +122,7 @@ impl GroupManagementState {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn remove_group(&self, object_id: &str) {
|
||||
pub(crate) fn remove_group(&self, object_id: &Uuid) {
|
||||
let group_not_found = self.group_by_object_id.remove(object_id).is_none();
|
||||
if group_not_found {
|
||||
// Log error if the group doesn't exist
|
||||
|
@ -139,11 +137,9 @@ impl GroupManagementState {
|
|||
pub(crate) fn insert_user(
|
||||
&self,
|
||||
user: &RealtimeUser,
|
||||
object_id: &str,
|
||||
object_id: Uuid,
|
||||
) -> Result<(), RealtimeError> {
|
||||
let editing = Editing {
|
||||
object_id: object_id.to_string(),
|
||||
};
|
||||
let editing = Editing { object_id };
|
||||
|
||||
let entry = self.editing_by_user.entry(user.clone());
|
||||
match entry {
|
||||
|
@ -189,7 +185,7 @@ impl GroupManagementState {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn contains_user(&self, object_id: &str, user: &RealtimeUser) -> bool {
|
||||
pub fn contains_user(&self, object_id: &Uuid, user: &RealtimeUser) -> bool {
|
||||
match self.group_by_object_id.try_get(object_id) {
|
||||
TryResult::Present(entry) => entry.value().contains_user(user),
|
||||
TryResult::Absent => false,
|
||||
|
@ -203,5 +199,5 @@ impl GroupManagementState {
|
|||
|
||||
#[derive(Debug, Hash, PartialEq, Eq, Clone)]
|
||||
struct Editing {
|
||||
pub object_id: String,
|
||||
pub object_id: Uuid,
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ use tokio::sync::mpsc::Sender;
|
|||
use tokio::task::yield_now;
|
||||
use tokio::time::interval;
|
||||
use tracing::{error, info, trace, warn};
|
||||
use uuid::Uuid;
|
||||
use yrs::updates::decoder::Decode;
|
||||
use yrs::StateVector;
|
||||
|
||||
|
@ -37,7 +38,7 @@ pub struct CollaborationServer<S> {
|
|||
/// Keep track of all collab groups
|
||||
group_manager: Arc<GroupManager<S>>,
|
||||
connect_state: ConnectState,
|
||||
group_sender_by_object_id: Arc<DashMap<String, GroupCommandSender>>,
|
||||
group_sender_by_object_id: Arc<DashMap<Uuid, GroupCommandSender>>,
|
||||
#[allow(dead_code)]
|
||||
metrics: Arc<CollabRealtimeMetrics>,
|
||||
enable_custom_runtime: bool,
|
||||
|
@ -88,7 +89,7 @@ where
|
|||
)
|
||||
.await?,
|
||||
);
|
||||
let group_sender_by_object_id: Arc<DashMap<String, GroupCommandSender>> =
|
||||
let group_sender_by_object_id: Arc<DashMap<_, GroupCommandSender>> =
|
||||
Arc::new(Default::default());
|
||||
|
||||
spawn_period_check_inactive_group(Arc::downgrade(&group_manager), &group_sender_by_object_id);
|
||||
|
@ -164,7 +165,8 @@ where
|
|||
message_by_oid: MessageByObjectId,
|
||||
) -> Result<(), RealtimeError> {
|
||||
for (object_id, collab_messages) in message_by_oid.into_inner() {
|
||||
let group_cmd_sender = self.create_group_if_not_exist(&object_id);
|
||||
let object_id = Uuid::parse_str(&object_id)?;
|
||||
let group_cmd_sender = self.create_group_if_not_exist(object_id);
|
||||
let cloned_user = user.clone();
|
||||
// Create a new task to send a message to the group command runner without waiting for the
|
||||
// result. This approach is used to prevent potential issues with the actor's mailbox in
|
||||
|
@ -210,9 +212,9 @@ where
|
|||
&self,
|
||||
message: ClientHttpUpdateMessage,
|
||||
) -> Result<(), RealtimeError> {
|
||||
let group_cmd_sender = self.create_group_if_not_exist(&message.object_id);
|
||||
let group_cmd_sender = self.create_group_if_not_exist(message.object_id);
|
||||
tokio::spawn(async move {
|
||||
let object_id = message.object_id.clone();
|
||||
let object_id = message.object_id;
|
||||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
let result = group_cmd_sender
|
||||
.send(GroupCommand::HandleClientHttpUpdate {
|
||||
|
@ -259,7 +261,7 @@ where
|
|||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
let _ = group_cmd_sender
|
||||
.send(GroupCommand::CalculateMissingUpdate {
|
||||
object_id: object_id.clone(),
|
||||
object_id,
|
||||
state_vector,
|
||||
ret: tx,
|
||||
})
|
||||
|
@ -316,15 +318,15 @@ where
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn create_group_if_not_exist(&self, object_id: &str) -> Sender<GroupCommand> {
|
||||
fn create_group_if_not_exist(&self, object_id: Uuid) -> Sender<GroupCommand> {
|
||||
let old_sender = self
|
||||
.group_sender_by_object_id
|
||||
.get(object_id)
|
||||
.get(&object_id)
|
||||
.map(|entry| entry.value().clone());
|
||||
|
||||
let sender = match old_sender {
|
||||
Some(sender) => sender,
|
||||
None => match self.group_sender_by_object_id.entry(object_id.to_string()) {
|
||||
None => match self.group_sender_by_object_id.entry(object_id) {
|
||||
Entry::Occupied(entry) => entry.get().clone(),
|
||||
Entry::Vacant(entry) => {
|
||||
let (new_sender, recv) = tokio::sync::mpsc::channel(2000);
|
||||
|
@ -334,7 +336,7 @@ where
|
|||
recv: Some(recv),
|
||||
};
|
||||
|
||||
let object_id = entry.key().clone();
|
||||
let object_id = *entry.key();
|
||||
if self.enable_custom_runtime {
|
||||
COLLAB_RUNTIME.spawn(runner.run(object_id));
|
||||
} else {
|
||||
|
@ -354,7 +356,7 @@ where
|
|||
&self,
|
||||
message: ClientGenerateEmbeddingMessage,
|
||||
) -> Result<(), RealtimeError> {
|
||||
let group_cmd_sender = self.create_group_if_not_exist(&message.object_id);
|
||||
let group_cmd_sender = self.create_group_if_not_exist(message.object_id);
|
||||
tokio::spawn(async move {
|
||||
let result = group_cmd_sender
|
||||
.send(GroupCommand::GenerateCollabEmbedding {
|
||||
|
@ -387,7 +389,7 @@ where
|
|||
|
||||
fn spawn_period_check_inactive_group<S>(
|
||||
weak_groups: Weak<GroupManager<S>>,
|
||||
group_sender_by_object_id: &Arc<DashMap<String, GroupCommandSender>>,
|
||||
group_sender_by_object_id: &Arc<DashMap<Uuid, GroupCommandSender>>,
|
||||
) where
|
||||
S: CollabStorage,
|
||||
{
|
||||
|
|
|
@ -5,7 +5,8 @@ use chrono::{DateTime, Utc};
|
|||
use collab::entity::{EncodedCollab, EncoderVersion};
|
||||
use collab_entity::CollabType;
|
||||
use sqlx::PgPool;
|
||||
use tracing::{debug, error, trace, warn};
|
||||
use tracing::{debug, error, trace};
|
||||
use uuid::Uuid;
|
||||
use validator::Validate;
|
||||
|
||||
use app_error::AppError;
|
||||
|
@ -24,7 +25,7 @@ use crate::metrics::CollabMetrics;
|
|||
|
||||
pub const SNAPSHOT_TICK_INTERVAL: Duration = Duration::from_secs(2);
|
||||
|
||||
fn collab_snapshot_key(workspace_id: &str, object_id: &str, snapshot_id: i64) -> String {
|
||||
fn collab_snapshot_key(workspace_id: &Uuid, object_id: &Uuid, snapshot_id: i64) -> String {
|
||||
let snapshot_id = u64::MAX - snapshot_id as u64;
|
||||
format!(
|
||||
"collabs/{}/{}/snapshot_{:16x}.v1.zstd",
|
||||
|
@ -32,7 +33,7 @@ fn collab_snapshot_key(workspace_id: &str, object_id: &str, snapshot_id: i64) ->
|
|||
)
|
||||
}
|
||||
|
||||
fn collab_snapshot_prefix(workspace_id: &str, object_id: &str) -> String {
|
||||
fn collab_snapshot_prefix(workspace_id: &Uuid, object_id: &Uuid) -> String {
|
||||
format!("collabs/{}/{}/snapshot_", workspace_id, object_id)
|
||||
}
|
||||
|
||||
|
@ -83,14 +84,9 @@ impl SnapshotControl {
|
|||
|
||||
pub async fn should_create_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: &Uuid,
|
||||
oid: &Uuid,
|
||||
) -> Result<bool, AppError> {
|
||||
if oid.is_empty() {
|
||||
warn!("unexpected empty object id when checking should_create_snapshot");
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let latest_created_at = self.latest_snapshot_time(workspace_id, oid).await?;
|
||||
// Subtracting a fixed duration that is known not to cause underflow. If `checked_sub_signed` returns `None`,
|
||||
// it indicates an error in calculation, thus defaulting to creating a snapshot just in case.
|
||||
|
@ -151,18 +147,18 @@ impl SnapshotControl {
|
|||
|
||||
Ok(AFSnapshotMeta {
|
||||
snapshot_id,
|
||||
object_id: params.object_id,
|
||||
object_id: params.object_id.to_string(),
|
||||
created_at: timestamp,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_collab_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
snapshot_id: &i64,
|
||||
) -> AppResult<SnapshotData> {
|
||||
let key = collab_snapshot_key(workspace_id, object_id, *snapshot_id);
|
||||
let key = collab_snapshot_key(&workspace_id, &object_id, *snapshot_id);
|
||||
match self.s3.get_blob(&key).await {
|
||||
Ok(resp) => {
|
||||
self.collab_metrics.read_snapshot.inc();
|
||||
|
@ -173,9 +169,9 @@ impl SnapshotControl {
|
|||
version: EncoderVersion::V1,
|
||||
};
|
||||
Ok(SnapshotData {
|
||||
object_id: object_id.to_string(),
|
||||
object_id,
|
||||
encoded_collab_v1: encoded_collab.encode_to_bytes()?,
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
})
|
||||
},
|
||||
Err(AppError::RecordNotFound(_)) => {
|
||||
|
@ -183,15 +179,15 @@ impl SnapshotControl {
|
|||
"snapshot {} for `{}` not found in s3: fallback to postgres",
|
||||
snapshot_id, object_id
|
||||
);
|
||||
match select_snapshot(&self.pg_pool, workspace_id, object_id, snapshot_id).await? {
|
||||
match select_snapshot(&self.pg_pool, &workspace_id, &object_id, snapshot_id).await? {
|
||||
None => Err(AppError::RecordNotFound(format!(
|
||||
"Can't find the snapshot with id:{}",
|
||||
snapshot_id
|
||||
))),
|
||||
Some(row) => Ok(SnapshotData {
|
||||
object_id: object_id.to_string(),
|
||||
object_id,
|
||||
encoded_collab_v1: row.blob,
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
}),
|
||||
}
|
||||
},
|
||||
|
@ -202,8 +198,8 @@ impl SnapshotControl {
|
|||
/// Returns list of snapshots for given object_id in descending order of creation time.
|
||||
pub async fn get_collab_snapshot_list(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: &Uuid,
|
||||
oid: &Uuid,
|
||||
) -> AppResult<AFSnapshotMetas> {
|
||||
let snapshot_prefix = collab_snapshot_prefix(workspace_id, oid);
|
||||
let resp = self
|
||||
|
@ -233,8 +229,8 @@ impl SnapshotControl {
|
|||
|
||||
pub async fn get_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
snapshot_id: &i64,
|
||||
) -> Result<SnapshotData, AppError> {
|
||||
self
|
||||
|
@ -244,11 +240,11 @@ impl SnapshotControl {
|
|||
|
||||
pub async fn get_latest_snapshot(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: Uuid,
|
||||
oid: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<Option<SnapshotData>, AppError> {
|
||||
let snapshot_prefix = collab_snapshot_prefix(workspace_id, oid);
|
||||
let snapshot_prefix = collab_snapshot_prefix(&workspace_id, &oid);
|
||||
let mut resp = self.s3.list_dir(&snapshot_prefix, 1).await?;
|
||||
if let Some(key) = resp.pop() {
|
||||
let resp = self.s3.get_blob(&key).await?;
|
||||
|
@ -259,19 +255,19 @@ impl SnapshotControl {
|
|||
version: EncoderVersion::V1,
|
||||
};
|
||||
Ok(Some(SnapshotData {
|
||||
object_id: oid.to_string(),
|
||||
object_id: oid,
|
||||
encoded_collab_v1: encoded_collab.encode_to_bytes()?,
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
}))
|
||||
} else {
|
||||
let snapshot = get_latest_snapshot(oid, &collab_type, &self.pg_pool).await?;
|
||||
let snapshot = get_latest_snapshot(&oid, &collab_type, &self.pg_pool).await?;
|
||||
Ok(
|
||||
snapshot
|
||||
.and_then(|row| row.snapshot_meta)
|
||||
.map(|meta| SnapshotData {
|
||||
object_id: oid.to_string(),
|
||||
object_id: oid,
|
||||
encoded_collab_v1: meta.snapshot,
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
@ -279,8 +275,8 @@ impl SnapshotControl {
|
|||
|
||||
async fn latest_snapshot_time(
|
||||
&self,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: &Uuid,
|
||||
oid: &Uuid,
|
||||
) -> Result<Option<DateTime<Utc>>, AppError> {
|
||||
let snapshot_prefix = collab_snapshot_prefix(workspace_id, oid);
|
||||
let mut resp = self.s3.list_dir(&snapshot_prefix, 1).await?;
|
||||
|
|
|
@ -51,6 +51,9 @@ pub enum ImportError {
|
|||
|
||||
#[error(transparent)]
|
||||
Internal(#[from] anyhow::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
InvalidUuid(#[from] uuid::Error),
|
||||
}
|
||||
|
||||
impl From<WorkerError> for ImportError {
|
||||
|
@ -214,6 +217,16 @@ impl ImportError {
|
|||
format!("Task ID: {} - Upload file too large: {} MB", task_id, file_size_in_mb),
|
||||
)
|
||||
}
|
||||
ImportError::InvalidUuid(err) => {
|
||||
(
|
||||
format!(
|
||||
"Task ID: {} - Identifier is not valid UUID: {}",
|
||||
task_id,
|
||||
err
|
||||
),
|
||||
format!("Task ID: {} - Identifier is not valid UUID", task_id),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -872,7 +872,7 @@ async fn process_unzip_file(
|
|||
redis_client: &mut ConnectionManager,
|
||||
s3_client: &Arc<dyn S3Client>,
|
||||
) -> Result<(), ImportError> {
|
||||
let workspace_id =
|
||||
let _ =
|
||||
Uuid::parse_str(&import_task.workspace_id).map_err(|err| ImportError::Internal(err.into()))?;
|
||||
let notion_importer = NotionImporter::new(
|
||||
import_task.uid,
|
||||
|
@ -898,9 +898,10 @@ async fn process_unzip_file(
|
|||
);
|
||||
|
||||
// 1. Open the workspace folder
|
||||
let workspace_id = Uuid::parse_str(&imported.workspace_id)?;
|
||||
let folder_collab = get_encode_collab_from_bytes(
|
||||
&imported.workspace_id,
|
||||
&imported.workspace_id,
|
||||
&workspace_id,
|
||||
&workspace_id,
|
||||
&CollabType::Folder,
|
||||
pg_pool,
|
||||
s3_client,
|
||||
|
@ -942,7 +943,7 @@ async fn process_unzip_file(
|
|||
.imported_collabs
|
||||
.into_iter()
|
||||
.map(|imported_collab| CollabParams {
|
||||
object_id: imported_collab.object_id,
|
||||
object_id: imported_collab.object_id.parse().unwrap(),
|
||||
collab_type: imported_collab.collab_type,
|
||||
encoded_collab_v1: Bytes::from(imported_collab.encoded_collab.encode_to_bytes().unwrap()),
|
||||
})
|
||||
|
@ -971,13 +972,12 @@ async fn process_unzip_file(
|
|||
"Failed to select workspace database storage id: {:?}",
|
||||
err
|
||||
))
|
||||
})
|
||||
.map(|id| id.to_string())?;
|
||||
})?;
|
||||
|
||||
// 4. Edit workspace database collab and then encode workspace database collab
|
||||
if !database_view_ids_by_database_id.is_empty() {
|
||||
let w_db_collab = get_encode_collab_from_bytes(
|
||||
&import_task.workspace_id,
|
||||
&workspace_id,
|
||||
&w_database_id,
|
||||
&CollabType::WorkspaceDatabase,
|
||||
pg_pool,
|
||||
|
@ -985,7 +985,7 @@ async fn process_unzip_file(
|
|||
)
|
||||
.await?;
|
||||
let mut w_database = WorkspaceDatabase::from_collab_doc_state(
|
||||
&w_database_id,
|
||||
&w_database_id.to_string(),
|
||||
CollabOrigin::Server,
|
||||
w_db_collab.into(),
|
||||
)
|
||||
|
@ -1003,7 +1003,7 @@ async fn process_unzip_file(
|
|||
Ok(bytes) => {
|
||||
if let Err(err) = redis_client
|
||||
.set_ex::<String, Vec<u8>, Value>(
|
||||
encode_collab_key(&w_database_id),
|
||||
encode_collab_key(&w_database_id.to_string()),
|
||||
bytes,
|
||||
2592000, // WorkspaceDatabase => 1 month
|
||||
)
|
||||
|
@ -1026,7 +1026,7 @@ async fn process_unzip_file(
|
|||
import_task.workspace_id
|
||||
);
|
||||
let w_database_collab_params = CollabParams {
|
||||
object_id: w_database_id.clone(),
|
||||
object_id: w_database_id,
|
||||
collab_type: CollabType::WorkspaceDatabase,
|
||||
encoded_collab_v1: Bytes::from(w_database_collab.encode_to_bytes().unwrap()),
|
||||
};
|
||||
|
@ -1066,7 +1066,7 @@ async fn process_unzip_file(
|
|||
}
|
||||
|
||||
let folder_collab_params = CollabParams {
|
||||
object_id: import_task.workspace_id.clone(),
|
||||
object_id: workspace_id,
|
||||
collab_type: CollabType::Folder,
|
||||
encoded_collab_v1: Bytes::from(folder_collab.encode_to_bytes().unwrap()),
|
||||
};
|
||||
|
@ -1095,7 +1095,7 @@ async fn process_unzip_file(
|
|||
insert_into_af_collab_bulk_for_user(
|
||||
&mut transaction,
|
||||
&import_task.uid,
|
||||
&import_task.workspace_id,
|
||||
workspace_id,
|
||||
&collab_params_list,
|
||||
)
|
||||
.await
|
||||
|
@ -1349,8 +1349,8 @@ async fn upload_file_to_s3(
|
|||
}
|
||||
|
||||
async fn get_encode_collab_from_bytes(
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
object_id: &Uuid,
|
||||
collab_type: &CollabType,
|
||||
pg_pool: &PgPool,
|
||||
s3: &Arc<dyn S3Client>,
|
||||
|
@ -1605,13 +1605,13 @@ async fn insert_meta_from_path(
|
|||
})
|
||||
}
|
||||
|
||||
fn collab_key(workspace_id: &str, object_id: &str) -> String {
|
||||
fn collab_key(workspace_id: &Uuid, object_id: &Uuid) -> String {
|
||||
format!(
|
||||
"collabs/{}/{}/encoded_collab.v1.zstd",
|
||||
workspace_id, object_id
|
||||
)
|
||||
}
|
||||
|
||||
fn encode_collab_key(object_id: &str) -> String {
|
||||
fn encode_collab_key<T: Display>(object_id: T) -> String {
|
||||
format!("encode_collab_v0:{}", object_id)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
use app_error::AppError;
|
||||
use collab_entity::CollabType;
|
||||
use database::index::get_collabs_indexed_at;
|
||||
use indexer::collab_indexer::{Indexer, IndexerProvider};
|
||||
use indexer::entity::EmbeddingRecord;
|
||||
|
@ -133,10 +132,7 @@ async fn process_upcoming_tasks(
|
|||
.collect();
|
||||
tasks.retain(|task| !task.data.is_empty());
|
||||
|
||||
let collab_ids: Vec<(String, CollabType)> = tasks
|
||||
.iter()
|
||||
.map(|task| (task.object_id.clone(), task.collab_type))
|
||||
.collect();
|
||||
let collab_ids: Vec<_> = tasks.iter().map(|task| task.object_id).collect();
|
||||
|
||||
let indexed_collabs = get_collabs_indexed_at(&pg_pool, collab_ids)
|
||||
.await
|
||||
|
|
|
@ -108,7 +108,7 @@ pub fn chat_scope() -> Scope {
|
|||
)
|
||||
}
|
||||
async fn create_chat_handler(
|
||||
path: web::Path<String>,
|
||||
path: web::Path<Uuid>,
|
||||
state: Data<AppState>,
|
||||
payload: Json<CreateChatParams>,
|
||||
) -> actix_web::Result<JsonAppResponse<()>> {
|
||||
|
|
|
@ -105,7 +105,7 @@ async fn create_upload(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
|
||||
let key = BlobPathV1 {
|
||||
|
@ -152,7 +152,7 @@ async fn upload_part_handler(
|
|||
let workspace_id = path_params.workspace_id;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
|
||||
let content_length = content_length.into_inner().into_inner();
|
||||
|
@ -203,7 +203,7 @@ async fn complete_upload_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
|
||||
let key = BlobPathV1 {
|
||||
|
@ -234,7 +234,7 @@ async fn put_blob_handler(
|
|||
let workspace_id = path.workspace_id;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
|
||||
let content_length = content_length.into_inner().into_inner();
|
||||
|
@ -308,7 +308,7 @@ async fn delete_blob_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
state
|
||||
.bucket_storage
|
||||
|
@ -340,7 +340,7 @@ async fn delete_blob_v1_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
state
|
||||
.bucket_storage
|
||||
|
@ -557,7 +557,7 @@ async fn put_blob_handler_v1(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, path.workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &path.workspace_id, Action::Write)
|
||||
.await?;
|
||||
|
||||
let content_length = content_length.into_inner().into_inner();
|
||||
|
|
|
@ -27,7 +27,7 @@ async fn document_search(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
let metrics = &*state.metrics.request_metrics;
|
||||
let resp = search_document(
|
||||
|
|
|
@ -403,7 +403,7 @@ async fn patch_workspace_handler(
|
|||
let uid = state.user_cache.get_user_uid(&uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, params.workspace_id, Action::Write)
|
||||
.enforce_action(&uid, ¶ms.workspace_id, Action::Write)
|
||||
.await?;
|
||||
let params = params.into_inner();
|
||||
workspace::ops::patch_workspace(
|
||||
|
@ -425,7 +425,7 @@ async fn delete_workspace_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Delete)
|
||||
.enforce_action(&uid, &workspace_id, Action::Delete)
|
||||
.await?;
|
||||
workspace::ops::delete_workspace_for_user(
|
||||
state.pg_pool.clone(),
|
||||
|
@ -469,7 +469,7 @@ async fn post_workspace_invite_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Owner)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
|
||||
let invitations = payload.into_inner();
|
||||
|
@ -542,7 +542,7 @@ async fn get_workspace_settings_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
let settings = workspace::ops::get_workspace_settings(&state.pg_pool, &workspace_id).await?;
|
||||
Ok(AppResponse::Ok().with_data(settings).into())
|
||||
|
@ -561,7 +561,7 @@ async fn post_workspace_settings_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
let settings =
|
||||
workspace::ops::update_workspace_settings(&state.pg_pool, &workspace_id, data).await?;
|
||||
|
@ -578,7 +578,7 @@ async fn get_workspace_members_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await?;
|
||||
let members = workspace::ops::get_workspace_members(&state.pg_pool, &workspace_id)
|
||||
.await?
|
||||
|
@ -605,7 +605,7 @@ async fn remove_workspace_member_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Owner)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
|
||||
let member_emails = payload
|
||||
|
@ -636,7 +636,7 @@ async fn get_workspace_member_handler(
|
|||
// Guest users can not get workspace members
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await?;
|
||||
let member_row = workspace::ops::get_workspace_member(&member_uid, &state.pg_pool, &workspace_id)
|
||||
.await
|
||||
|
@ -671,7 +671,7 @@ async fn get_workspace_member_v1_handler(
|
|||
// Guest users can not get workspace members
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await?;
|
||||
let member_row =
|
||||
workspace::ops::get_workspace_member_by_uuid(member_uuid, &state.pg_pool, workspace_id)
|
||||
|
@ -705,7 +705,7 @@ async fn open_workspace_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
let workspace = workspace::ops::open_workspace(&state.pg_pool, &user_uuid, &workspace_id).await?;
|
||||
Ok(AppResponse::Ok().with_data(workspace).into())
|
||||
|
@ -739,7 +739,7 @@ async fn update_workspace_member_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Owner)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
|
||||
let changeset = payload.into_inner();
|
||||
|
@ -824,11 +824,9 @@ async fn create_collab_handler(
|
|||
.await?
|
||||
{
|
||||
if let Ok(text) = Document::open(collab).and_then(|doc| doc.to_plain_text(false, true)) {
|
||||
let workspace_id_uuid =
|
||||
Uuid::parse_str(&workspace_id).map_err(|err| AppError::Internal(err.into()))?;
|
||||
let pending = UnindexedCollabTask::new(
|
||||
workspace_id_uuid,
|
||||
params.object_id.clone(),
|
||||
workspace_id,
|
||||
params.object_id,
|
||||
params.collab_type,
|
||||
UnindexedData::Text(text),
|
||||
);
|
||||
|
@ -849,7 +847,7 @@ async fn create_collab_handler(
|
|||
let action = format!("Create new collab: {}", params);
|
||||
state
|
||||
.collab_access_control_storage
|
||||
.upsert_new_collab_with_transaction(&workspace_id, &uid, params, &mut transaction, &action)
|
||||
.upsert_new_collab_with_transaction(workspace_id, &uid, params, &mut transaction, &action)
|
||||
.await?;
|
||||
|
||||
transaction
|
||||
|
@ -871,8 +869,7 @@ async fn batch_create_collab_handler(
|
|||
req: HttpRequest,
|
||||
) -> Result<Json<AppResponse<()>>> {
|
||||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
let workspace_id_uuid = workspace_id.into_inner();
|
||||
let workspace_id = workspace_id_uuid.to_string();
|
||||
let workspace_id = workspace_id.into_inner();
|
||||
let compress_type = compress_type_from_header_value(req.headers())?;
|
||||
event!(tracing::Level::DEBUG, "start decompressing collab list");
|
||||
|
||||
|
@ -917,7 +914,7 @@ async fn batch_create_collab_handler(
|
|||
EncodedCollab::decode_from_bytes(¶ms.encoded_collab_v1).ok()?;
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
¶ms.object_id,
|
||||
¶ms.object_id.to_string(),
|
||||
DataSource::DocStateV1(encoded_collab.doc_state.to_vec()),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -982,8 +979,8 @@ async fn batch_create_collab_handler(
|
|||
Some(text) => text
|
||||
.map(|text| {
|
||||
UnindexedCollabTask::new(
|
||||
workspace_id_uuid,
|
||||
value.1.object_id.clone(),
|
||||
workspace_id,
|
||||
value.1.object_id,
|
||||
value.1.collab_type,
|
||||
UnindexedData::Text(text),
|
||||
)
|
||||
|
@ -1001,7 +998,7 @@ async fn batch_create_collab_handler(
|
|||
let start = Instant::now();
|
||||
state
|
||||
.collab_access_control_storage
|
||||
.batch_insert_new_collab(&workspace_id, &uid, collab_params_list)
|
||||
.batch_insert_new_collab(workspace_id, &uid, collab_params_list)
|
||||
.await?;
|
||||
|
||||
tracing::info!(
|
||||
|
@ -1032,7 +1029,7 @@ async fn get_collab_handler(
|
|||
.await
|
||||
.map_err(AppResponseError::from)?;
|
||||
let params = payload.into_inner();
|
||||
let object_id = params.object_id.clone();
|
||||
let object_id = params.object_id;
|
||||
let encode_collab = state
|
||||
.collab_access_control_storage
|
||||
.get_encode_collab(GetCollabOrigin::User { uid }, params, true)
|
||||
|
@ -1049,12 +1046,11 @@ async fn get_collab_handler(
|
|||
|
||||
async fn v1_get_collab_handler(
|
||||
user_uuid: UserUuid,
|
||||
path: web::Path<(String, String)>,
|
||||
path: web::Path<(Uuid, Uuid)>,
|
||||
query: web::Query<CollabTypeParam>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<CollabResponse>>> {
|
||||
let (workspace_id, object_id) = path.into_inner();
|
||||
let collab_type = query.into_inner().collab_type;
|
||||
let uid = state
|
||||
.user_cache
|
||||
.get_user_uid(&user_uuid)
|
||||
|
@ -1064,8 +1060,8 @@ async fn v1_get_collab_handler(
|
|||
let param = QueryCollabParams {
|
||||
workspace_id,
|
||||
inner: QueryCollab {
|
||||
object_id: object_id.clone(),
|
||||
collab_type,
|
||||
object_id,
|
||||
collab_type: query.collab_type,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -1085,7 +1081,7 @@ async fn v1_get_collab_handler(
|
|||
|
||||
async fn get_collab_json_handler(
|
||||
user_uuid: UserUuid,
|
||||
path: web::Path<(String, String)>,
|
||||
path: web::Path<(Uuid, Uuid)>,
|
||||
query: web::Query<CollabTypeParam>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<CollabJsonResponse>>> {
|
||||
|
@ -1100,7 +1096,7 @@ async fn get_collab_json_handler(
|
|||
let param = QueryCollabParams {
|
||||
workspace_id,
|
||||
inner: QueryCollab {
|
||||
object_id: object_id.clone(),
|
||||
object_id,
|
||||
collab_type,
|
||||
},
|
||||
};
|
||||
|
@ -1137,12 +1133,7 @@ async fn post_web_update_handler(
|
|||
let (workspace_id, object_id) = path.into_inner();
|
||||
state
|
||||
.collab_access_control
|
||||
.enforce_action(
|
||||
&workspace_id.to_string(),
|
||||
&uid,
|
||||
&object_id.to_string(),
|
||||
Action::Write,
|
||||
)
|
||||
.enforce_action(&workspace_id, &uid, &object_id, Action::Write)
|
||||
.await?;
|
||||
let user = realtime_user_for_web_request(req.headers(), uid)?;
|
||||
trace!("create onetime web realtime user: {}", user);
|
||||
|
@ -1461,7 +1452,7 @@ async fn delete_page_from_trash_handler(
|
|||
let (workspace_id, view_id) = path.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
let user = realtime_user_for_web_request(req.headers(), uid)?;
|
||||
delete_trash(
|
||||
|
@ -1491,7 +1482,7 @@ async fn delete_all_pages_from_trash_handler(
|
|||
let workspace_id = path.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
let user = realtime_user_for_web_request(req.headers(), uid)?;
|
||||
delete_all_pages_from_trash(
|
||||
|
@ -1507,7 +1498,7 @@ async fn delete_all_pages_from_trash_handler(
|
|||
|
||||
async fn publish_page_handler(
|
||||
user_uuid: UserUuid,
|
||||
path: web::Path<(Uuid, String)>,
|
||||
path: web::Path<(Uuid, Uuid)>,
|
||||
payload: Json<PublishPageParams>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<()>>> {
|
||||
|
@ -1519,7 +1510,7 @@ async fn publish_page_handler(
|
|||
.map_err(AppResponseError::from)?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await?;
|
||||
let PublishPageParams {
|
||||
publish_name,
|
||||
|
@ -1534,7 +1525,7 @@ async fn publish_page_handler(
|
|||
uid,
|
||||
*user_uuid,
|
||||
workspace_id,
|
||||
&view_id,
|
||||
view_id,
|
||||
visible_database_view_ids,
|
||||
publish_name,
|
||||
comments_enabled.unwrap_or(true),
|
||||
|
@ -1557,7 +1548,7 @@ async fn unpublish_page_handler(
|
|||
.map_err(AppResponseError::from)?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_uuid, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_uuid, AFRole::Member)
|
||||
.await?;
|
||||
unpublish_page(
|
||||
state.published_collab_store.as_ref(),
|
||||
|
@ -1571,7 +1562,7 @@ async fn unpublish_page_handler(
|
|||
|
||||
async fn post_page_database_view_handler(
|
||||
user_uuid: UserUuid,
|
||||
path: web::Path<(Uuid, String)>,
|
||||
path: web::Path<(Uuid, Uuid)>,
|
||||
payload: Json<CreatePageDatabaseViewParams>,
|
||||
state: Data<AppState>,
|
||||
server: Data<RealtimeServerAddr>,
|
||||
|
@ -1630,7 +1621,7 @@ async fn update_page_view_handler(
|
|||
|
||||
async fn get_page_view_handler(
|
||||
user_uuid: UserUuid,
|
||||
path: web::Path<(Uuid, String)>,
|
||||
path: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<PageCollab>>> {
|
||||
let (workspace_uuid, view_id) = path.into_inner();
|
||||
|
@ -1645,7 +1636,7 @@ async fn get_page_view_handler(
|
|||
&state.collab_access_control_storage,
|
||||
uid,
|
||||
workspace_uuid,
|
||||
&view_id,
|
||||
view_id,
|
||||
)
|
||||
.await?;
|
||||
Ok(Json(AppResponse::Ok().with_data(page_collab)))
|
||||
|
@ -1679,13 +1670,13 @@ async fn favorite_page_view_handler(
|
|||
#[instrument(level = "trace", skip_all, err)]
|
||||
async fn get_collab_snapshot_handler(
|
||||
payload: Json<QuerySnapshotParams>,
|
||||
path: web::Path<(String, String)>,
|
||||
path: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<SnapshotData>>> {
|
||||
let (workspace_id, object_id) = path.into_inner();
|
||||
let data = state
|
||||
.collab_access_control_storage
|
||||
.get_collab_snapshot(&workspace_id.to_string(), &object_id, &payload.snapshot_id)
|
||||
.get_collab_snapshot(workspace_id, object_id, &payload.snapshot_id)
|
||||
.await
|
||||
.map_err(AppResponseError::from)?;
|
||||
|
||||
|
@ -1696,7 +1687,7 @@ async fn get_collab_snapshot_handler(
|
|||
async fn create_collab_snapshot_handler(
|
||||
user_uuid: UserUuid,
|
||||
state: Data<AppState>,
|
||||
path: web::Path<(String, String)>,
|
||||
path: web::Path<(Uuid, Uuid)>,
|
||||
payload: Json<CollabType>,
|
||||
) -> Result<Json<AppResponse<AFSnapshotMeta>>> {
|
||||
let (workspace_id, object_id) = path.into_inner();
|
||||
|
@ -1710,7 +1701,7 @@ async fn create_collab_snapshot_handler(
|
|||
.collab_access_control_storage
|
||||
.get_encode_collab(
|
||||
GetCollabOrigin::User { uid },
|
||||
QueryCollabParams::new(&object_id, collab_type, &workspace_id),
|
||||
QueryCollabParams::new(object_id, collab_type, workspace_id),
|
||||
true,
|
||||
)
|
||||
.await?
|
||||
|
@ -1732,7 +1723,7 @@ async fn create_collab_snapshot_handler(
|
|||
#[instrument(level = "trace", skip(path, state), err)]
|
||||
async fn get_all_collab_snapshot_list_handler(
|
||||
_user_uuid: UserUuid,
|
||||
path: web::Path<(String, String)>,
|
||||
path: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<AFSnapshotMetas>>> {
|
||||
let (workspace_id, object_id) = path.into_inner();
|
||||
|
@ -1747,7 +1738,7 @@ async fn get_all_collab_snapshot_list_handler(
|
|||
#[instrument(level = "debug", skip(payload, state), err)]
|
||||
async fn batch_get_collab_handler(
|
||||
user_uuid: UserUuid,
|
||||
path: Path<String>,
|
||||
path: Path<Uuid>,
|
||||
state: Data<AppState>,
|
||||
payload: Json<BatchQueryCollabParams>,
|
||||
) -> Result<Json<AppResponse<BatchQueryCollabResult>>> {
|
||||
|
@ -1760,7 +1751,7 @@ async fn batch_get_collab_handler(
|
|||
let result = BatchQueryCollabResult(
|
||||
state
|
||||
.collab_access_control_storage
|
||||
.batch_get_collab(&uid, &workspace_id, payload.into_inner().0, false)
|
||||
.batch_get_collab(&uid, workspace_id, payload.into_inner().0, false)
|
||||
.await,
|
||||
);
|
||||
Ok(Json(AppResponse::Ok().with_data(result)))
|
||||
|
@ -1775,16 +1766,13 @@ async fn update_collab_handler(
|
|||
let (params, workspace_id) = payload.into_inner().split();
|
||||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
|
||||
let create_params = CreateCollabParams::from((workspace_id.to_string(), params));
|
||||
let create_params = CreateCollabParams::from((workspace_id, params));
|
||||
let (params, workspace_id) = create_params.split();
|
||||
if state
|
||||
.indexer_scheduler
|
||||
.can_index_workspace(&workspace_id)
|
||||
.await?
|
||||
{
|
||||
let workspace_id_uuid =
|
||||
Uuid::parse_str(&workspace_id).map_err(|err| AppError::Internal(err.into()))?;
|
||||
|
||||
match params.collab_type {
|
||||
CollabType::Document => {
|
||||
let collab = collab_from_encode_collab(¶ms.object_id, ¶ms.encoded_collab_v1)
|
||||
|
@ -1807,8 +1795,8 @@ async fn update_collab_handler(
|
|||
|
||||
if let Ok(text) = Document::open(collab).and_then(|doc| doc.to_plain_text(false, true)) {
|
||||
let pending = UnindexedCollabTask::new(
|
||||
workspace_id_uuid,
|
||||
params.object_id.clone(),
|
||||
workspace_id,
|
||||
params.object_id,
|
||||
params.collab_type,
|
||||
UnindexedData::Text(text),
|
||||
);
|
||||
|
@ -1825,7 +1813,7 @@ async fn update_collab_handler(
|
|||
|
||||
state
|
||||
.collab_access_control_storage
|
||||
.queue_insert_or_update_collab(&workspace_id, &uid, params, false)
|
||||
.queue_insert_or_update_collab(workspace_id, &uid, params, false)
|
||||
.await?;
|
||||
Ok(AppResponse::Ok().into())
|
||||
}
|
||||
|
@ -1864,7 +1852,7 @@ async fn put_workspace_default_published_view_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Owner)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
let new_default_pub_view_id = payload.into_inner().view_id;
|
||||
biz::workspace::publish::set_workspace_default_publish_view(
|
||||
|
@ -1885,7 +1873,7 @@ async fn delete_workspace_default_published_view_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Owner)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
biz::workspace::publish::unset_workspace_default_publish_view(&state.pg_pool, &workspace_id)
|
||||
.await?;
|
||||
|
@ -1913,7 +1901,7 @@ async fn put_publish_namespace_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Owner)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
let UpdatePublishNamespace {
|
||||
old_namespace,
|
||||
|
@ -1985,7 +1973,7 @@ async fn post_published_duplicate_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
let params = params.into_inner();
|
||||
let root_view_id_for_duplicate =
|
||||
|
@ -1995,7 +1983,7 @@ async fn post_published_duplicate_handler(
|
|||
state.collab_access_control_storage.clone(),
|
||||
uid,
|
||||
params.published_view_id,
|
||||
workspace_id.to_string(),
|
||||
workspace_id,
|
||||
params.dest_view_id,
|
||||
)
|
||||
.await?;
|
||||
|
@ -2014,7 +2002,7 @@ async fn list_published_collab_info_handler(
|
|||
let publish_infos = biz::workspace::publish::list_collab_publish_info(
|
||||
state.published_collab_store.as_ref(),
|
||||
&state.collab_access_control_storage,
|
||||
&workspace_id.into_inner(),
|
||||
workspace_id.into_inner(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -2287,7 +2275,7 @@ async fn get_workspace_usage_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Owner)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
let res =
|
||||
biz::workspace::ops::get_workspace_document_total_bytes(&state.pg_pool, &workspace_id).await?;
|
||||
|
@ -2308,13 +2296,9 @@ async fn get_workspace_folder_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
let root_view_id = if let Some(root_view_id) = query.root_view_id.as_ref() {
|
||||
root_view_id.to_string()
|
||||
} else {
|
||||
workspace_id.to_string()
|
||||
};
|
||||
let root_view_id = query.root_view_id.unwrap_or(workspace_id);
|
||||
let folder_view = biz::collab::ops::get_user_workspace_structure(
|
||||
&state.metrics.appflowy_web_metrics,
|
||||
server,
|
||||
|
@ -2338,7 +2322,7 @@ async fn get_recent_views_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
let folder_views = get_user_recent_folder_views(
|
||||
&state.collab_access_control_storage,
|
||||
|
@ -2362,7 +2346,7 @@ async fn get_favorite_views_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
let folder_views = get_user_favorite_folder_views(
|
||||
&state.collab_access_control_storage,
|
||||
|
@ -2386,7 +2370,7 @@ async fn get_trash_views_handler(
|
|||
let workspace_id = workspace_id.into_inner();
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
let folder_views =
|
||||
get_user_trash_folder_views(&state.collab_access_control_storage, uid, workspace_id).await?;
|
||||
|
@ -2411,7 +2395,7 @@ async fn get_workspace_publish_outline_handler(
|
|||
|
||||
async fn list_database_handler(
|
||||
user_uuid: UserUuid,
|
||||
workspace_id: web::Path<String>,
|
||||
workspace_id: web::Path<Uuid>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<Vec<AFDatabase>>>> {
|
||||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
|
@ -2428,7 +2412,7 @@ async fn list_database_handler(
|
|||
|
||||
async fn list_database_row_id_handler(
|
||||
user_uuid: UserUuid,
|
||||
path_param: web::Path<(Uuid, String)>,
|
||||
path_param: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<Vec<AFDatabaseRow>>>> {
|
||||
let (workspace_id, db_id) = path_param.into_inner();
|
||||
|
@ -2436,13 +2420,13 @@ async fn list_database_row_id_handler(
|
|||
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
|
||||
let db_rows = biz::collab::ops::list_database_row_ids(
|
||||
&state.collab_access_control_storage,
|
||||
&workspace_id.to_string(),
|
||||
&db_id,
|
||||
workspace_id,
|
||||
db_id,
|
||||
)
|
||||
.await?;
|
||||
Ok(Json(AppResponse::Ok().with_data(db_rows)))
|
||||
|
@ -2450,7 +2434,7 @@ async fn list_database_row_id_handler(
|
|||
|
||||
async fn post_database_row_handler(
|
||||
user_uuid: UserUuid,
|
||||
path_param: web::Path<(Uuid, String)>,
|
||||
path_param: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
add_database_row: Json<AddDatatabaseRow>,
|
||||
) -> Result<Json<AppResponse<String>>> {
|
||||
|
@ -2458,7 +2442,7 @@ async fn post_database_row_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
|
||||
let AddDatatabaseRow { cells, document } = add_database_row.into_inner();
|
||||
|
@ -2466,8 +2450,8 @@ async fn post_database_row_handler(
|
|||
let new_db_row_id = biz::collab::ops::insert_database_row(
|
||||
state.collab_access_control_storage.clone(),
|
||||
&state.pg_pool,
|
||||
&workspace_id.to_string(),
|
||||
&db_id,
|
||||
workspace_id,
|
||||
db_id,
|
||||
uid,
|
||||
None,
|
||||
cells,
|
||||
|
@ -2479,7 +2463,7 @@ async fn post_database_row_handler(
|
|||
|
||||
async fn put_database_row_handler(
|
||||
user_uuid: UserUuid,
|
||||
path_param: web::Path<(Uuid, String)>,
|
||||
path_param: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
upsert_db_row: Json<UpsertDatatabaseRow>,
|
||||
) -> Result<Json<AppResponse<String>>> {
|
||||
|
@ -2487,7 +2471,7 @@ async fn put_database_row_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
|
||||
let UpsertDatatabaseRow {
|
||||
|
@ -2498,9 +2482,8 @@ async fn put_database_row_handler(
|
|||
|
||||
let row_id = {
|
||||
let mut hasher = Sha256::new();
|
||||
// TODO: check if it is safe to use workspace_id directly
|
||||
hasher.update(workspace_id.to_string());
|
||||
hasher.update(&db_id);
|
||||
hasher.update(workspace_id);
|
||||
hasher.update(db_id);
|
||||
hasher.update(pre_hash);
|
||||
let hash = hasher.finalize();
|
||||
Uuid::from_bytes([
|
||||
|
@ -2509,38 +2492,37 @@ async fn put_database_row_handler(
|
|||
hash[10], hash[11], hash[12], hash[13], hash[14], hash[15],
|
||||
])
|
||||
};
|
||||
let row_id_str = row_id.to_string();
|
||||
|
||||
biz::collab::ops::upsert_database_row(
|
||||
state.collab_access_control_storage.clone(),
|
||||
&state.pg_pool,
|
||||
&workspace_id.to_string(),
|
||||
&db_id,
|
||||
workspace_id,
|
||||
db_id,
|
||||
uid,
|
||||
&row_id_str,
|
||||
row_id,
|
||||
cells,
|
||||
document,
|
||||
)
|
||||
.await?;
|
||||
Ok(Json(AppResponse::Ok().with_data(row_id_str)))
|
||||
Ok(Json(AppResponse::Ok().with_data(row_id.to_string())))
|
||||
}
|
||||
|
||||
async fn get_database_fields_handler(
|
||||
user_uuid: UserUuid,
|
||||
path_param: web::Path<(Uuid, String)>,
|
||||
path_param: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<Vec<AFDatabaseField>>>> {
|
||||
let (workspace_id, db_id) = path_param.into_inner();
|
||||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
|
||||
let db_fields = biz::collab::ops::get_database_fields(
|
||||
&state.collab_access_control_storage,
|
||||
&workspace_id.to_string(),
|
||||
&db_id,
|
||||
workspace_id,
|
||||
db_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -2549,7 +2531,7 @@ async fn get_database_fields_handler(
|
|||
|
||||
async fn post_database_fields_handler(
|
||||
user_uuid: UserUuid,
|
||||
path_param: web::Path<(Uuid, String)>,
|
||||
path_param: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
field: Json<AFInsertDatabaseField>,
|
||||
) -> Result<Json<AppResponse<String>>> {
|
||||
|
@ -2557,15 +2539,15 @@ async fn post_database_fields_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Write)
|
||||
.enforce_action(&uid, &workspace_id, Action::Write)
|
||||
.await?;
|
||||
|
||||
let field_id = biz::collab::ops::add_database_field(
|
||||
uid,
|
||||
state.collab_access_control_storage.clone(),
|
||||
&state.pg_pool,
|
||||
&workspace_id.to_string(),
|
||||
&db_id,
|
||||
workspace_id,
|
||||
db_id,
|
||||
field.into_inner(),
|
||||
)
|
||||
.await?;
|
||||
|
@ -2575,7 +2557,7 @@ async fn post_database_fields_handler(
|
|||
|
||||
async fn list_database_row_id_updated_handler(
|
||||
user_uuid: UserUuid,
|
||||
path_param: web::Path<(Uuid, String)>,
|
||||
path_param: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
param: web::Query<ListDatabaseRowUpdatedParam>,
|
||||
) -> Result<Json<AppResponse<Vec<DatabaseRowUpdatedItem>>>> {
|
||||
|
@ -2584,7 +2566,7 @@ async fn list_database_row_id_updated_handler(
|
|||
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
|
||||
// Default to 1 hour ago
|
||||
|
@ -2595,8 +2577,8 @@ async fn list_database_row_id_updated_handler(
|
|||
let db_rows = biz::collab::ops::list_database_row_ids_updated(
|
||||
&state.collab_access_control_storage,
|
||||
&state.pg_pool,
|
||||
&workspace_id.to_string(),
|
||||
&db_id,
|
||||
workspace_id,
|
||||
db_id,
|
||||
&after,
|
||||
)
|
||||
.await?;
|
||||
|
@ -2605,7 +2587,7 @@ async fn list_database_row_id_updated_handler(
|
|||
|
||||
async fn list_database_row_details_handler(
|
||||
user_uuid: UserUuid,
|
||||
path_param: web::Path<(Uuid, String)>,
|
||||
path_param: web::Path<(Uuid, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
param: web::Query<ListDatabaseRowDetailParam>,
|
||||
) -> Result<Json<AppResponse<Vec<AFDatabaseRowDetail>>>> {
|
||||
|
@ -2613,21 +2595,11 @@ async fn list_database_row_details_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
let list_db_row_query = param.into_inner();
|
||||
let with_doc = list_db_row_query.with_doc.unwrap_or_default();
|
||||
let row_ids = list_db_row_query.into_ids();
|
||||
|
||||
if let Err(e) = Uuid::parse_str(&db_id) {
|
||||
return Err(AppError::InvalidRequest(format!("invalid database id `{}`: {}", db_id, e)).into());
|
||||
}
|
||||
|
||||
for id in row_ids.iter() {
|
||||
if let Err(e) = Uuid::parse_str(id) {
|
||||
return Err(AppError::InvalidRequest(format!("invalid row id `{}`: {}", id, e)).into());
|
||||
}
|
||||
}
|
||||
let row_ids = list_db_row_query.into_ids()?;
|
||||
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_action(&uid, workspace_id, Action::Read)
|
||||
.enforce_action(&uid, &workspace_id, Action::Read)
|
||||
.await?;
|
||||
|
||||
static UNSUPPORTED_FIELD_TYPES: &[FieldType] = &[FieldType::Relation];
|
||||
|
@ -2635,7 +2607,7 @@ async fn list_database_row_details_handler(
|
|||
let db_rows = biz::collab::ops::list_database_row_details(
|
||||
&state.collab_access_control_storage,
|
||||
uid,
|
||||
workspace_id.to_string(),
|
||||
workspace_id,
|
||||
db_id,
|
||||
&row_ids,
|
||||
UNSUPPORTED_FIELD_TYPES,
|
||||
|
@ -2690,13 +2662,11 @@ async fn parser_realtime_msg(
|
|||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
async fn get_collab_embed_info_handler(
|
||||
path: web::Path<(String, String)>,
|
||||
query: web::Query<CollabTypeParam>,
|
||||
path: web::Path<(String, Uuid)>,
|
||||
state: Data<AppState>,
|
||||
) -> Result<Json<AppResponse<AFCollabEmbedInfo>>> {
|
||||
let (_, object_id) = path.into_inner();
|
||||
let collab_type = query.into_inner().collab_type;
|
||||
let info = database::collab::select_collab_embed_info(&state.pg_pool, &object_id, collab_type)
|
||||
let info = database::collab::select_collab_embed_info(&state.pg_pool, &object_id)
|
||||
.await
|
||||
.map_err(AppResponseError::from)?
|
||||
.ok_or_else(|| {
|
||||
|
@ -2800,8 +2770,8 @@ async fn collab_full_sync_handler(
|
|||
let (tx, rx) = tokio::sync::oneshot::channel();
|
||||
let message = ClientHttpUpdateMessage {
|
||||
user,
|
||||
workspace_id: workspace_id.to_string(),
|
||||
object_id: object_id.to_string(),
|
||||
workspace_id,
|
||||
object_id,
|
||||
collab_type,
|
||||
update: Bytes::from(doc_state),
|
||||
state_vector: Some(Bytes::from(sv)),
|
||||
|
@ -2837,7 +2807,7 @@ async fn post_quick_note_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await?;
|
||||
let data = data.into_inner();
|
||||
let quick_note = create_quick_note(&state.pg_pool, uid, workspace_id, data.data.as_ref()).await?;
|
||||
|
@ -2854,7 +2824,7 @@ async fn list_quick_notes_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await?;
|
||||
let ListQuickNotesQueryParams {
|
||||
search_term,
|
||||
|
@ -2883,7 +2853,7 @@ async fn update_quick_note_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await?;
|
||||
update_quick_note(&state.pg_pool, quick_note_id, &data.data).await?;
|
||||
Ok(Json(AppResponse::Ok()))
|
||||
|
@ -2898,7 +2868,7 @@ async fn delete_quick_note_handler(
|
|||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
state
|
||||
.workspace_access_control
|
||||
.enforce_role(&uid, workspace_id, AFRole::Member)
|
||||
.enforce_role(&uid, &workspace_id, AFRole::Member)
|
||||
.await?;
|
||||
delete_quick_note(&state.pg_pool, quick_note_id).await?;
|
||||
Ok(Json(AppResponse::Ok()))
|
||||
|
|
|
@ -84,10 +84,7 @@ pub async fn get_access_request(
|
|||
let folder = get_latest_collab_folder(
|
||||
collab_storage,
|
||||
GetCollabOrigin::Server,
|
||||
&access_request_with_view_id
|
||||
.workspace
|
||||
.workspace_id
|
||||
.to_string(),
|
||||
access_request_with_view_id.workspace.workspace_id,
|
||||
)
|
||||
.await?;
|
||||
let view = folder.get_view(&access_request_with_view_id.view_id.to_string());
|
||||
|
@ -125,7 +122,7 @@ pub async fn approve_or_reject_access_request(
|
|||
) -> Result<(), AppError> {
|
||||
let access_request = select_access_request_by_request_id(pg_pool, request_id).await?;
|
||||
workspace_access_control
|
||||
.enforce_role(&uid, access_request.workspace.workspace_id, AFRole::Owner)
|
||||
.enforce_role(&uid, &access_request.workspace.workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
|
||||
let mut txn = pg_pool.begin().await.context("approving request")?;
|
||||
|
|
|
@ -27,7 +27,7 @@ use validator::Validate;
|
|||
pub(crate) async fn create_chat(
|
||||
pg_pool: &PgPool,
|
||||
params: CreateChatParams,
|
||||
workspace_id: &str,
|
||||
workspace_id: &Uuid,
|
||||
) -> Result<(), AppError> {
|
||||
params.validate()?;
|
||||
trace!("[Chat] create chat {:?}", params);
|
||||
|
|
|
@ -176,11 +176,11 @@ pub struct PostgresDatabaseCollabService {
|
|||
}
|
||||
|
||||
impl PostgresDatabaseCollabService {
|
||||
pub async fn get_collab(&self, oid: &str, collab_type: CollabType) -> EncodedCollab {
|
||||
pub async fn get_collab(&self, oid: Uuid, collab_type: CollabType) -> EncodedCollab {
|
||||
get_latest_collab_encoded(
|
||||
&self.collab_storage,
|
||||
GetCollabOrigin::Server,
|
||||
&self.workspace_id.to_string(),
|
||||
self.workspace_id,
|
||||
oid,
|
||||
collab_type,
|
||||
)
|
||||
|
@ -197,10 +197,11 @@ impl DatabaseCollabService for PostgresDatabaseCollabService {
|
|||
object_type: CollabType,
|
||||
encoded_collab: Option<(EncodedCollab, bool)>,
|
||||
) -> Result<Collab, DatabaseError> {
|
||||
let object_id = Uuid::parse_str(object_id)?;
|
||||
match encoded_collab {
|
||||
None => Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
self.get_collab(object_id, object_type).await.into(),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -208,7 +209,7 @@ impl DatabaseCollabService for PostgresDatabaseCollabService {
|
|||
.map_err(|err| DatabaseError::Internal(err.into())),
|
||||
Some((encoded_collab, _)) => Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
encoded_collab.into(),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -222,15 +223,22 @@ impl DatabaseCollabService for PostgresDatabaseCollabService {
|
|||
object_ids: Vec<String>,
|
||||
collab_type: CollabType,
|
||||
) -> Result<EncodeCollabByOid, DatabaseError> {
|
||||
let mut object_uuids = Vec::with_capacity(object_ids.len());
|
||||
for object_id in object_ids {
|
||||
object_uuids.push(Uuid::parse_str(&object_id)?);
|
||||
}
|
||||
let encoded_collabs = batch_get_latest_collab_encoded(
|
||||
&self.collab_storage,
|
||||
GetCollabOrigin::Server,
|
||||
&self.workspace_id.to_string(),
|
||||
&object_ids,
|
||||
self.workspace_id,
|
||||
&object_uuids,
|
||||
collab_type,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k.to_string(), v))
|
||||
.collect();
|
||||
Ok(encoded_collabs)
|
||||
}
|
||||
|
||||
|
|
|
@ -12,9 +12,9 @@ use shared_entity::dto::workspace_dto::{
|
|||
use uuid::Uuid;
|
||||
|
||||
pub struct PrivateSpaceAndTrashViews {
|
||||
pub my_private_space_ids: HashSet<String>,
|
||||
pub other_private_space_ids: HashSet<String>,
|
||||
pub view_ids_in_trash: HashSet<String>,
|
||||
pub my_private_space_ids: HashSet<Uuid>,
|
||||
pub other_private_space_ids: HashSet<Uuid>,
|
||||
pub view_ids_in_trash: HashSet<Uuid>,
|
||||
}
|
||||
|
||||
pub fn private_space_and_trash_view_ids(folder: &Folder) -> PrivateSpaceAndTrashViews {
|
||||
|
@ -24,25 +24,28 @@ pub fn private_space_and_trash_view_ids(folder: &Folder) -> PrivateSpaceAndTrash
|
|||
for private_section in folder.get_my_private_sections() {
|
||||
match folder.get_view(&private_section.id) {
|
||||
Some(private_view) if check_if_view_is_space(&private_view) => {
|
||||
my_private_space_ids.insert(private_section.id.clone());
|
||||
let section_id = Uuid::parse_str(&private_section.id).unwrap();
|
||||
my_private_space_ids.insert(section_id);
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
for private_section in folder.get_all_private_sections() {
|
||||
let private_section_id = Uuid::parse_str(&private_section.id).unwrap();
|
||||
match folder.get_view(&private_section.id) {
|
||||
Some(private_view)
|
||||
if check_if_view_is_space(&private_view)
|
||||
&& !my_private_space_ids.contains(&private_section.id) =>
|
||||
&& !my_private_space_ids.contains(&private_section_id) =>
|
||||
{
|
||||
other_private_space_ids.insert(private_section.id.clone());
|
||||
other_private_space_ids.insert(private_section_id);
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
for trash_view in folder.get_all_trash_sections() {
|
||||
view_ids_in_trash.insert(trash_view.id.clone());
|
||||
let trash_view_id = Uuid::parse_str(&trash_view.id).unwrap();
|
||||
view_ids_in_trash.insert(trash_view_id);
|
||||
}
|
||||
PrivateSpaceAndTrashViews {
|
||||
my_private_space_ids,
|
||||
|
@ -54,16 +57,16 @@ pub fn private_space_and_trash_view_ids(folder: &Folder) -> PrivateSpaceAndTrash
|
|||
/// Return all folders belonging to a workspace, excluding private sections which the user does not have access to.
|
||||
pub fn collab_folder_to_folder_view(
|
||||
workspace_id: Uuid,
|
||||
root_view_id: &str,
|
||||
root_view_id: &Uuid,
|
||||
folder: &Folder,
|
||||
max_depth: u32,
|
||||
pubished_view_ids: &HashSet<String>,
|
||||
pubished_view_ids: &HashSet<Uuid>,
|
||||
) -> Result<FolderView, AppError> {
|
||||
let private_space_and_trash_view_ids = private_space_and_trash_view_ids(folder);
|
||||
|
||||
to_folder_view(
|
||||
workspace_id,
|
||||
"",
|
||||
None,
|
||||
root_view_id,
|
||||
folder,
|
||||
&private_space_and_trash_view_ids,
|
||||
|
@ -78,9 +81,10 @@ pub fn collab_folder_to_folder_view(
|
|||
)))
|
||||
}
|
||||
|
||||
pub fn get_prev_view_id(folder: &Folder, view_id: &str) -> Option<String> {
|
||||
pub fn get_prev_view_id(folder: &Folder, view_id: &Uuid) -> Option<Uuid> {
|
||||
let view_id = view_id.to_string();
|
||||
folder
|
||||
.get_view(view_id)
|
||||
.get_view(&view_id.to_string())
|
||||
.and_then(|view| folder.get_view(&view.parent_view_id))
|
||||
.and_then(|parent_view| {
|
||||
parent_view
|
||||
|
@ -91,7 +95,7 @@ pub fn get_prev_view_id(folder: &Folder, view_id: &str) -> Option<String> {
|
|||
if pos == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(parent_view.children[pos - 1].id.clone())
|
||||
parent_view.children[pos - 1].id.parse().ok()
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -100,11 +104,11 @@ pub fn get_prev_view_id(folder: &Folder, view_id: &str) -> Option<String> {
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
fn to_folder_view(
|
||||
workspace_id: Uuid,
|
||||
parent_view_id: &str,
|
||||
view_id: &str,
|
||||
parent_view_id: Option<&Uuid>,
|
||||
view_id: &Uuid,
|
||||
folder: &Folder,
|
||||
private_space_and_trash_views: &PrivateSpaceAndTrashViews,
|
||||
published_view_ids: &HashSet<String>,
|
||||
published_view_ids: &HashSet<Uuid>,
|
||||
parent_is_private: bool,
|
||||
depth: u32,
|
||||
max_depth: u32,
|
||||
|
@ -123,7 +127,7 @@ fn to_folder_view(
|
|||
return None;
|
||||
}
|
||||
|
||||
let view = match folder.get_view(view_id) {
|
||||
let view = match folder.get_view(&view_id.to_string()) {
|
||||
Some(view) => view,
|
||||
None => {
|
||||
return None;
|
||||
|
@ -131,14 +135,15 @@ fn to_folder_view(
|
|||
};
|
||||
|
||||
// There is currently a bug, in which the parent_view_id is not always set correctly
|
||||
if !(parent_view_id.is_empty() || view.parent_view_id == parent_view_id) {
|
||||
let view_parent = Uuid::parse_str(&view.parent_view_id).ok();
|
||||
if parent_view_id.is_some() && view_parent.as_ref() != parent_view_id {
|
||||
return None;
|
||||
}
|
||||
|
||||
let view_is_space = check_if_view_is_space(&view);
|
||||
// There is currently a bug, which a document that is not a space ended up as child
|
||||
// of the workspace
|
||||
let parent_is_workspace = workspace_id.to_string() == parent_view_id;
|
||||
let parent_is_workspace = Some(&workspace_id) == parent_view_id;
|
||||
if !view_is_space && parent_is_workspace {
|
||||
return None;
|
||||
}
|
||||
|
@ -154,10 +159,11 @@ fn to_folder_view(
|
|||
.children
|
||||
.iter()
|
||||
.filter_map(|child_view_id| {
|
||||
let child_view_id = Uuid::parse_str(&child_view_id.id).ok()?;
|
||||
to_folder_view(
|
||||
workspace_id,
|
||||
view_id,
|
||||
&child_view_id.id,
|
||||
Some(view_id),
|
||||
&child_view_id,
|
||||
folder,
|
||||
private_space_and_trash_views,
|
||||
published_view_ids,
|
||||
|
@ -168,8 +174,8 @@ fn to_folder_view(
|
|||
})
|
||||
.collect();
|
||||
Some(FolderView {
|
||||
view_id: view_id.to_string(),
|
||||
parent_view_id: view.parent_view_id.clone(),
|
||||
view_id: *view_id,
|
||||
parent_view_id: view.parent_view_id.parse().ok(),
|
||||
prev_view_id: get_prev_view_id(folder, view_id),
|
||||
name: view.name.clone(),
|
||||
icon: view
|
||||
|
@ -209,10 +215,11 @@ pub fn section_items_to_favorite_folder_view(
|
|||
.unwrap_or(false),
|
||||
None => false,
|
||||
};
|
||||
let view_id = v.id.parse().unwrap();
|
||||
let folder_view = FolderView {
|
||||
view_id: v.id.clone(),
|
||||
parent_view_id: v.parent_view_id.clone(),
|
||||
prev_view_id: get_prev_view_id(folder, &v.id),
|
||||
view_id,
|
||||
parent_view_id: v.parent_view_id.parse().ok(),
|
||||
prev_view_id: get_prev_view_id(folder, &view_id),
|
||||
name: v.name.clone(),
|
||||
icon: v.icon.as_ref().map(|icon| to_dto_view_icon(icon.clone())),
|
||||
is_space: false,
|
||||
|
@ -248,10 +255,11 @@ pub fn section_items_to_recent_folder_view(
|
|||
.filter_map(|section_item| {
|
||||
let view = folder.get_view(§ion_item.id);
|
||||
view.map(|v| {
|
||||
let view_id = v.id.parse().unwrap();
|
||||
let folder_view = FolderView {
|
||||
view_id: v.id.clone(),
|
||||
parent_view_id: v.parent_view_id.clone(),
|
||||
prev_view_id: get_prev_view_id(folder, &v.id),
|
||||
view_id,
|
||||
parent_view_id: v.parent_view_id.parse().ok(),
|
||||
prev_view_id: get_prev_view_id(folder, &view_id),
|
||||
name: v.name.clone(),
|
||||
icon: v.icon.as_ref().map(|icon| to_dto_view_icon(icon.clone())),
|
||||
is_space: false,
|
||||
|
@ -285,10 +293,11 @@ pub fn section_items_to_trash_folder_view(
|
|||
.filter_map(|section_item| {
|
||||
let view = folder.get_view(§ion_item.id);
|
||||
view.map(|v| {
|
||||
let view_id = v.id.parse().unwrap();
|
||||
let folder_view = FolderView {
|
||||
view_id: v.id.clone(),
|
||||
parent_view_id: v.parent_view_id.clone(),
|
||||
prev_view_id: get_prev_view_id(folder, &v.id),
|
||||
view_id,
|
||||
parent_view_id: v.parent_view_id.parse().ok(),
|
||||
prev_view_id: get_prev_view_id(folder, &view_id),
|
||||
name: v.name.clone(),
|
||||
icon: v.icon.as_ref().map(|icon| to_dto_view_icon(icon.clone())),
|
||||
is_space: false,
|
||||
|
@ -328,9 +337,10 @@ fn get_view_and_children_recursive(
|
|||
private_space_and_trash_views: &PrivateSpaceAndTrashViews,
|
||||
view_id: &str,
|
||||
) -> Option<ViewTree> {
|
||||
let view_uuid = Uuid::parse_str(view_id).ok()?;
|
||||
if private_space_and_trash_views
|
||||
.view_ids_in_trash
|
||||
.contains(view_id)
|
||||
.contains(&view_uuid)
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
|
|
@ -8,7 +8,6 @@ use chrono::DateTime;
|
|||
use chrono::Utc;
|
||||
use collab::preclude::Collab;
|
||||
use collab_database::database::gen_field_id;
|
||||
use collab_database::database::gen_row_id;
|
||||
use collab_database::entity::FieldType;
|
||||
use collab_database::fields::Field;
|
||||
use collab_database::fields::TypeOptions;
|
||||
|
@ -54,16 +53,6 @@ use shared_entity::dto::workspace_dto::TrashFolderView;
|
|||
use sqlx::PgPool;
|
||||
use yrs::Map;
|
||||
|
||||
use crate::api::metrics::AppFlowyWebMetrics;
|
||||
use crate::api::ws::RealtimeServerAddr;
|
||||
use crate::biz::collab::folder_view::check_if_view_is_space;
|
||||
use crate::biz::collab::utils::get_database_row_doc_changes;
|
||||
use crate::biz::workspace::ops::broadcast_update_with_timeout;
|
||||
use crate::biz::workspace::page_view::update_workspace_folder_data;
|
||||
use shared_entity::dto::workspace_dto::{FolderView, PublishedView};
|
||||
use sqlx::types::Uuid;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use super::folder_view::collab_folder_to_folder_view;
|
||||
use super::folder_view::section_items_to_favorite_folder_view;
|
||||
use super::folder_view::section_items_to_recent_folder_view;
|
||||
|
@ -85,6 +74,15 @@ use super::utils::CreatedRowDocument;
|
|||
use super::utils::DocChanges;
|
||||
use super::utils::DEFAULT_SPACE_ICON;
|
||||
use super::utils::DEFAULT_SPACE_ICON_COLOR;
|
||||
use crate::api::metrics::AppFlowyWebMetrics;
|
||||
use crate::api::ws::RealtimeServerAddr;
|
||||
use crate::biz::collab::folder_view::check_if_view_is_space;
|
||||
use crate::biz::collab::utils::get_database_row_doc_changes;
|
||||
use crate::biz::workspace::ops::broadcast_update_with_timeout;
|
||||
use crate::biz::workspace::page_view::update_workspace_folder_data;
|
||||
use shared_entity::dto::workspace_dto::{FolderView, PublishedView};
|
||||
use sqlx::types::Uuid;
|
||||
use std::collections::HashSet;
|
||||
|
||||
pub async fn get_user_favorite_folder_views(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
|
@ -92,12 +90,8 @@ pub async fn get_user_favorite_folder_views(
|
|||
uid: i64,
|
||||
workspace_id: Uuid,
|
||||
) -> Result<Vec<FavoriteFolderView>, AppError> {
|
||||
let folder = get_latest_collab_folder(
|
||||
collab_storage,
|
||||
GetCollabOrigin::User { uid },
|
||||
&workspace_id.to_string(),
|
||||
)
|
||||
.await?;
|
||||
let folder =
|
||||
get_latest_collab_folder(collab_storage, GetCollabOrigin::User { uid }, workspace_id).await?;
|
||||
let publish_view_ids = select_published_view_ids_for_workspace(pg_pool, workspace_id).await?;
|
||||
let publish_view_ids: HashSet<String> = publish_view_ids
|
||||
.into_iter()
|
||||
|
@ -126,12 +120,8 @@ pub async fn get_user_recent_folder_views(
|
|||
uid: i64,
|
||||
workspace_id: Uuid,
|
||||
) -> Result<Vec<RecentFolderView>, AppError> {
|
||||
let folder = get_latest_collab_folder(
|
||||
collab_storage,
|
||||
GetCollabOrigin::User { uid },
|
||||
&workspace_id.to_string(),
|
||||
)
|
||||
.await?;
|
||||
let folder =
|
||||
get_latest_collab_folder(collab_storage, GetCollabOrigin::User { uid }, workspace_id).await?;
|
||||
let deleted_section_item_ids: Vec<String> = folder
|
||||
.get_my_trash_sections()
|
||||
.iter()
|
||||
|
@ -159,12 +149,8 @@ pub async fn get_user_trash_folder_views(
|
|||
uid: i64,
|
||||
workspace_id: Uuid,
|
||||
) -> Result<Vec<TrashFolderView>, AppError> {
|
||||
let folder = get_latest_collab_folder(
|
||||
collab_storage,
|
||||
GetCollabOrigin::User { uid },
|
||||
&workspace_id.to_string(),
|
||||
)
|
||||
.await?;
|
||||
let folder =
|
||||
get_latest_collab_folder(collab_storage, GetCollabOrigin::User { uid }, workspace_id).await?;
|
||||
let section_items = folder.get_my_trash_sections();
|
||||
Ok(section_items_to_trash_folder_view(§ion_items, &folder))
|
||||
}
|
||||
|
@ -260,7 +246,7 @@ pub async fn get_user_workspace_structure(
|
|||
user: RealtimeUser,
|
||||
workspace_id: Uuid,
|
||||
depth: u32,
|
||||
root_view_id: &str,
|
||||
root_view_id: &Uuid,
|
||||
) -> Result<FolderView, AppError> {
|
||||
let depth_limit = 10;
|
||||
if depth > depth_limit {
|
||||
|
@ -272,17 +258,14 @@ pub async fn get_user_workspace_structure(
|
|||
let folder = get_latest_collab_folder(
|
||||
collab_storage,
|
||||
GetCollabOrigin::User { uid: user.uid },
|
||||
&workspace_id.to_string(),
|
||||
workspace_id,
|
||||
)
|
||||
.await?;
|
||||
let patched_folder =
|
||||
fix_old_workspace_folder(appflowy_web_metrics, server, user, folder, workspace_id).await?;
|
||||
|
||||
let publish_view_ids = select_published_view_ids_for_workspace(pg_pool, workspace_id).await?;
|
||||
let publish_view_ids: HashSet<String> = publish_view_ids
|
||||
.into_iter()
|
||||
.map(|id| id.to_string())
|
||||
.collect();
|
||||
let publish_view_ids: HashSet<_> = publish_view_ids.into_iter().collect();
|
||||
collab_folder_to_folder_view(
|
||||
workspace_id,
|
||||
root_view_id,
|
||||
|
@ -297,13 +280,13 @@ pub async fn get_latest_workspace_database(
|
|||
pg_pool: &PgPool,
|
||||
collab_origin: GetCollabOrigin,
|
||||
workspace_id: Uuid,
|
||||
) -> Result<(String, WorkspaceDatabase), AppError> {
|
||||
) -> Result<(Uuid, WorkspaceDatabase), AppError> {
|
||||
let workspace_database_oid = select_workspace_database_oid(pg_pool, &workspace_id).await?;
|
||||
let workspace_database_collab = get_latest_collab(
|
||||
collab_storage,
|
||||
collab_origin,
|
||||
&workspace_id.to_string(),
|
||||
&workspace_database_oid,
|
||||
workspace_id,
|
||||
workspace_database_oid,
|
||||
CollabType::WorkspaceDatabase,
|
||||
)
|
||||
.await?;
|
||||
|
@ -319,12 +302,8 @@ pub async fn get_published_view(
|
|||
pg_pool: &PgPool,
|
||||
) -> Result<PublishedView, AppError> {
|
||||
let workspace_id = select_workspace_id_for_publish_namespace(pg_pool, &publish_namespace).await?;
|
||||
let folder = get_latest_collab_folder(
|
||||
collab_storage,
|
||||
GetCollabOrigin::Server,
|
||||
&workspace_id.to_string(),
|
||||
)
|
||||
.await?;
|
||||
let folder =
|
||||
get_latest_collab_folder(collab_storage, GetCollabOrigin::Server, workspace_id).await?;
|
||||
let publish_view_ids_with_publish_info =
|
||||
select_published_view_ids_with_publish_info_for_workspace(pg_pool, workspace_id).await?;
|
||||
let publish_view_id_to_info_map: HashMap<String, PublishedViewInfo> =
|
||||
|
@ -356,16 +335,15 @@ pub async fn list_database(
|
|||
pg_pool: &PgPool,
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
uid: i64,
|
||||
workspace_uuid_str: String,
|
||||
workspace_id: Uuid,
|
||||
) -> Result<Vec<AFDatabase>, AppError> {
|
||||
let workspace_uuid: Uuid = workspace_uuid_str.as_str().parse()?;
|
||||
let ws_db_oid = select_workspace_database_oid(pg_pool, &workspace_uuid).await?;
|
||||
let ws_db_oid = select_workspace_database_oid(pg_pool, &workspace_id).await?;
|
||||
|
||||
let mut ws_body_collab = get_latest_collab(
|
||||
collab_storage,
|
||||
GetCollabOrigin::Server,
|
||||
&workspace_uuid_str,
|
||||
&ws_db_oid,
|
||||
workspace_id,
|
||||
ws_db_oid,
|
||||
CollabType::WorkspaceDatabase,
|
||||
)
|
||||
.await?;
|
||||
|
@ -378,12 +356,8 @@ pub async fn list_database(
|
|||
})?;
|
||||
let db_metas = ws_body.get_all_meta(&ws_body_collab.transact());
|
||||
|
||||
let folder = get_latest_collab_folder(
|
||||
collab_storage,
|
||||
GetCollabOrigin::User { uid },
|
||||
&workspace_uuid_str,
|
||||
)
|
||||
.await?;
|
||||
let folder =
|
||||
get_latest_collab_folder(collab_storage, GetCollabOrigin::User { uid }, workspace_id).await?;
|
||||
|
||||
let trash = folder
|
||||
.get_all_trash_sections()
|
||||
|
@ -412,8 +386,8 @@ pub async fn list_database(
|
|||
|
||||
pub async fn list_database_row_ids(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
workspace_uuid_str: &str,
|
||||
database_uuid_str: &str,
|
||||
workspace_uuid_str: Uuid,
|
||||
database_uuid_str: Uuid,
|
||||
) -> Result<Vec<AFDatabaseRow>, AppError> {
|
||||
let (db_collab, db_body) =
|
||||
get_latest_collab_database_body(collab_storage, workspace_uuid_str, database_uuid_str).await?;
|
||||
|
@ -440,35 +414,36 @@ pub async fn list_database_row_ids(
|
|||
pub async fn insert_database_row(
|
||||
collab_storage: Arc<CollabAccessControlStorage>,
|
||||
pg_pool: &PgPool,
|
||||
workspace_uuid_str: &str,
|
||||
database_uuid_str: &str,
|
||||
workspace_uuid: Uuid,
|
||||
database_uuid: Uuid,
|
||||
uid: i64,
|
||||
new_db_row_id: Option<&str>,
|
||||
new_db_row_id: Option<Uuid>,
|
||||
cell_value_by_id: HashMap<String, serde_json::Value>,
|
||||
row_doc_content: Option<String>,
|
||||
) -> Result<String, AppError> {
|
||||
let new_db_row_id: RowId = new_db_row_id
|
||||
.map(|id| RowId::from(id.to_string()))
|
||||
.unwrap_or_else(gen_row_id);
|
||||
|
||||
let new_db_row_id = new_db_row_id.unwrap_or_else(Uuid::new_v4);
|
||||
let new_db_row_id_str = RowId::from(new_db_row_id.to_string());
|
||||
let creation_time = Utc::now();
|
||||
|
||||
let mut new_db_row_collab =
|
||||
Collab::new_with_origin(CollabOrigin::Empty, new_db_row_id.clone(), vec![], false);
|
||||
let mut new_db_row_collab = Collab::new_with_origin(
|
||||
CollabOrigin::Empty,
|
||||
new_db_row_id.to_string(),
|
||||
vec![],
|
||||
false,
|
||||
);
|
||||
let new_db_row_body = DatabaseRowBody::create(
|
||||
new_db_row_id.clone(),
|
||||
new_db_row_id_str.clone(),
|
||||
&mut new_db_row_collab,
|
||||
Row::empty(new_db_row_id.clone(), database_uuid_str),
|
||||
Row::empty(new_db_row_id_str, &database_uuid.to_string()),
|
||||
);
|
||||
new_db_row_body.update(&mut new_db_row_collab.transact_mut(), |row_update| {
|
||||
row_update.set_created_at(Utc::now().timestamp());
|
||||
});
|
||||
|
||||
let new_row_doc_creation: Option<(String, CreatedRowDocument)> = match row_doc_content {
|
||||
let new_row_doc_creation: Option<(Uuid, CreatedRowDocument)> = match row_doc_content {
|
||||
Some(row_doc_content) if !row_doc_content.is_empty() => {
|
||||
// update row to indicate that the document is not empty
|
||||
let is_document_empty_id =
|
||||
meta_id_from_row_id(&new_db_row_id.parse()?, RowMetaKey::IsDocumentEmpty);
|
||||
let is_document_empty_id = meta_id_from_row_id(&new_db_row_id, RowMetaKey::IsDocumentEmpty);
|
||||
new_db_row_body.get_meta().insert(
|
||||
&mut new_db_row_collab.transact_mut(),
|
||||
is_document_empty_id,
|
||||
|
@ -481,10 +456,11 @@ pub async fn insert_database_row(
|
|||
.map_err(|err| AppError::Internal(anyhow::anyhow!("Failed to get document id: {:?}", err)))?
|
||||
.ok_or_else(|| AppError::Internal(anyhow::anyhow!("Failed to get document id")))?;
|
||||
|
||||
let new_doc_id = Uuid::parse_str(&new_doc_id)?;
|
||||
let created_row_doc = create_row_document(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
uid,
|
||||
&new_doc_id,
|
||||
new_doc_id,
|
||||
&collab_storage,
|
||||
row_doc_content,
|
||||
)
|
||||
|
@ -495,7 +471,7 @@ pub async fn insert_database_row(
|
|||
};
|
||||
|
||||
let (mut db_collab, db_body) =
|
||||
get_latest_collab_database_body(&collab_storage, workspace_uuid_str, database_uuid_str).await?;
|
||||
get_latest_collab_database_body(&collab_storage, workspace_uuid, database_uuid).await?;
|
||||
write_to_database_row(
|
||||
&db_body,
|
||||
&mut new_db_row_collab.transact_mut(),
|
||||
|
@ -509,8 +485,8 @@ pub async fn insert_database_row(
|
|||
let ts_now = creation_time.timestamp();
|
||||
let row_order = db_body
|
||||
.create_row(CreateRowParams {
|
||||
id: new_db_row_id.clone(),
|
||||
database_id: database_uuid_str.to_string(),
|
||||
id: new_db_row_id.to_string().into(),
|
||||
database_id: database_uuid.to_string(),
|
||||
cells: new_db_row_body
|
||||
.cells(&new_db_row_collab.transact())
|
||||
.unwrap_or_default(),
|
||||
|
@ -548,7 +524,7 @@ pub async fn insert_database_row(
|
|||
// insert document
|
||||
collab_storage
|
||||
.upsert_new_collab_with_transaction(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: doc_id,
|
||||
|
@ -563,10 +539,10 @@ pub async fn insert_database_row(
|
|||
// update folder and broadcast
|
||||
collab_storage
|
||||
.upsert_new_collab_with_transaction(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: workspace_uuid_str.to_string(),
|
||||
object_id: workspace_uuid,
|
||||
encoded_collab_v1: created_doc.updated_folder.into(),
|
||||
collab_type: CollabType::Folder,
|
||||
},
|
||||
|
@ -576,7 +552,7 @@ pub async fn insert_database_row(
|
|||
.await?;
|
||||
broadcast_update_with_timeout(
|
||||
collab_storage.clone(),
|
||||
workspace_uuid_str.to_string(),
|
||||
workspace_uuid,
|
||||
created_doc.folder_updates,
|
||||
)
|
||||
.await;
|
||||
|
@ -585,10 +561,10 @@ pub async fn insert_database_row(
|
|||
// insert row
|
||||
collab_storage
|
||||
.upsert_new_collab_with_transaction(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: new_db_row_id.to_string(),
|
||||
object_id: new_db_row_id,
|
||||
encoded_collab_v1: new_db_row_ec_v1.into(),
|
||||
collab_type: CollabType::DatabaseRow,
|
||||
},
|
||||
|
@ -600,10 +576,10 @@ pub async fn insert_database_row(
|
|||
// update database
|
||||
collab_storage
|
||||
.upsert_new_collab_with_transaction(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: database_uuid_str.to_string(),
|
||||
object_id: database_uuid,
|
||||
encoded_collab_v1: updated_db_collab.into(),
|
||||
collab_type: CollabType::Database,
|
||||
},
|
||||
|
@ -613,12 +589,7 @@ pub async fn insert_database_row(
|
|||
.await?;
|
||||
|
||||
db_txn.commit().await?;
|
||||
broadcast_update_with_timeout(
|
||||
collab_storage,
|
||||
database_uuid_str.to_string(),
|
||||
db_collab_update,
|
||||
)
|
||||
.await;
|
||||
broadcast_update_with_timeout(collab_storage, database_uuid, db_collab_update).await;
|
||||
Ok(new_db_row_id.to_string())
|
||||
}
|
||||
|
||||
|
@ -626,23 +597,23 @@ pub async fn insert_database_row(
|
|||
pub async fn upsert_database_row(
|
||||
collab_storage: Arc<CollabAccessControlStorage>,
|
||||
pg_pool: &PgPool,
|
||||
workspace_uuid_str: &str,
|
||||
database_uuid_str: &str,
|
||||
workspace_uuid: Uuid,
|
||||
database_uuid: Uuid,
|
||||
uid: i64,
|
||||
row_id: &str,
|
||||
row_id: Uuid,
|
||||
cell_value_by_id: HashMap<String, serde_json::Value>,
|
||||
row_doc_content: Option<String>,
|
||||
) -> Result<(), AppError> {
|
||||
let (mut db_row_collab, db_row_body) =
|
||||
match get_latest_collab_database_row_body(&collab_storage, workspace_uuid_str, row_id).await {
|
||||
match get_latest_collab_database_row_body(&collab_storage, workspace_uuid, row_id).await {
|
||||
Ok(res) => res,
|
||||
Err(err) => match err {
|
||||
AppError::RecordNotFound(_) => {
|
||||
return insert_database_row(
|
||||
collab_storage,
|
||||
pg_pool,
|
||||
workspace_uuid_str,
|
||||
database_uuid_str,
|
||||
workspace_uuid,
|
||||
database_uuid,
|
||||
uid,
|
||||
Some(row_id),
|
||||
cell_value_by_id,
|
||||
|
@ -658,7 +629,7 @@ pub async fn upsert_database_row(
|
|||
// At this point, db row exists,
|
||||
// so we modify it, put into storage and broadcast change
|
||||
let (_db_collab, db_body) =
|
||||
get_latest_collab_database_body(&collab_storage, workspace_uuid_str, database_uuid_str).await?;
|
||||
get_latest_collab_database_body(&collab_storage, workspace_uuid, database_uuid).await?;
|
||||
let mut db_row_txn = db_row_collab.transact_mut();
|
||||
write_to_database_row(
|
||||
&db_body,
|
||||
|
@ -670,13 +641,13 @@ pub async fn upsert_database_row(
|
|||
.await?;
|
||||
|
||||
// determine if there are any document changes
|
||||
let doc_changes: Option<(String, DocChanges)> = get_database_row_doc_changes(
|
||||
let doc_changes: Option<(Uuid, DocChanges)> = get_database_row_doc_changes(
|
||||
&collab_storage,
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
row_doc_content,
|
||||
&db_row_body,
|
||||
&mut db_row_txn,
|
||||
row_id,
|
||||
&row_id,
|
||||
uid,
|
||||
)
|
||||
.await?;
|
||||
|
@ -690,10 +661,10 @@ pub async fn upsert_database_row(
|
|||
let mut db_txn = pg_pool.begin().await?;
|
||||
collab_storage
|
||||
.upsert_new_collab_with_transaction(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: row_id.to_string(),
|
||||
object_id: row_id,
|
||||
encoded_collab_v1: db_row_ec_v1.into(),
|
||||
collab_type: CollabType::DatabaseRow,
|
||||
},
|
||||
|
@ -701,12 +672,7 @@ pub async fn upsert_database_row(
|
|||
"inserting new database row from server",
|
||||
)
|
||||
.await?;
|
||||
broadcast_update_with_timeout(
|
||||
collab_storage.clone(),
|
||||
row_id.to_string(),
|
||||
db_row_collab_updates,
|
||||
)
|
||||
.await;
|
||||
broadcast_update_with_timeout(collab_storage.clone(), row_id, db_row_collab_updates).await;
|
||||
|
||||
// handle document changes
|
||||
if let Some((doc_id, doc_changes)) = doc_changes {
|
||||
|
@ -714,10 +680,10 @@ pub async fn upsert_database_row(
|
|||
DocChanges::Update(updated_doc, doc_update) => {
|
||||
collab_storage
|
||||
.upsert_new_collab_with_transaction(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: doc_id.clone(),
|
||||
object_id: doc_id,
|
||||
encoded_collab_v1: updated_doc.into(),
|
||||
collab_type: CollabType::Document,
|
||||
},
|
||||
|
@ -737,7 +703,7 @@ pub async fn upsert_database_row(
|
|||
// insert document
|
||||
collab_storage
|
||||
.upsert_new_collab_with_transaction(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: doc_id,
|
||||
|
@ -752,10 +718,10 @@ pub async fn upsert_database_row(
|
|||
// update folder and broadcast
|
||||
collab_storage
|
||||
.upsert_new_collab_with_transaction(
|
||||
workspace_uuid_str,
|
||||
workspace_uuid,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: workspace_uuid_str.to_string(),
|
||||
object_id: workspace_uuid,
|
||||
encoded_collab_v1: updated_folder.into(),
|
||||
collab_type: CollabType::Folder,
|
||||
},
|
||||
|
@ -763,12 +729,7 @@ pub async fn upsert_database_row(
|
|||
"inserting updated folder from server",
|
||||
)
|
||||
.await?;
|
||||
broadcast_update_with_timeout(
|
||||
collab_storage,
|
||||
workspace_uuid_str.to_string(),
|
||||
folder_updates,
|
||||
)
|
||||
.await;
|
||||
broadcast_update_with_timeout(collab_storage, workspace_uuid, folder_updates).await;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -779,11 +740,11 @@ pub async fn upsert_database_row(
|
|||
|
||||
pub async fn get_database_fields(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
workspace_uuid_str: &str,
|
||||
database_uuid_str: &str,
|
||||
workspace_uuid: Uuid,
|
||||
database_uuid: Uuid,
|
||||
) -> Result<Vec<AFDatabaseField>, AppError> {
|
||||
let (db_collab, db_body) =
|
||||
get_latest_collab_database_body(collab_storage, workspace_uuid_str, database_uuid_str).await?;
|
||||
get_latest_collab_database_body(collab_storage, workspace_uuid, database_uuid).await?;
|
||||
|
||||
let all_fields = db_body.fields.get_all_fields(&db_collab.transact());
|
||||
let mut acc = Vec::with_capacity(all_fields.len());
|
||||
|
@ -806,8 +767,8 @@ pub async fn add_database_field(
|
|||
uid: i64,
|
||||
collab_storage: Arc<CollabAccessControlStorage>,
|
||||
pg_pool: &PgPool,
|
||||
workspace_id: &str,
|
||||
database_id: &str,
|
||||
workspace_id: Uuid,
|
||||
database_id: Uuid,
|
||||
insert_field: AFInsertDatabaseField,
|
||||
) -> Result<String, AppError> {
|
||||
let (mut db_collab, db_body) =
|
||||
|
@ -855,7 +816,7 @@ pub async fn add_database_field(
|
|||
workspace_id,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: database_id.to_string(),
|
||||
object_id: database_id,
|
||||
encoded_collab_v1: updated_db_collab.into(),
|
||||
collab_type: CollabType::Database,
|
||||
},
|
||||
|
@ -865,7 +826,7 @@ pub async fn add_database_field(
|
|||
.await?;
|
||||
|
||||
pg_txn.commit().await?;
|
||||
broadcast_update_with_timeout(collab_storage, database_id.to_string(), db_collab_update).await;
|
||||
broadcast_update_with_timeout(collab_storage, database_id, db_collab_update).await;
|
||||
|
||||
Ok(new_id)
|
||||
}
|
||||
|
@ -873,17 +834,16 @@ pub async fn add_database_field(
|
|||
pub async fn list_database_row_ids_updated(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
pg_pool: &PgPool,
|
||||
workspace_uuid_str: &str,
|
||||
database_uuid_str: &str,
|
||||
workspace_uuid: Uuid,
|
||||
database_uuid: Uuid,
|
||||
after: &DateTime<Utc>,
|
||||
) -> Result<Vec<DatabaseRowUpdatedItem>, AppError> {
|
||||
let row_ids = list_database_row_ids(collab_storage, workspace_uuid_str, database_uuid_str)
|
||||
let row_ids: Vec<_> = list_database_row_ids(collab_storage, workspace_uuid, database_uuid)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|row| row.id)
|
||||
.collect::<Vec<String>>();
|
||||
.flat_map(|row| Uuid::parse_str(&row.id))
|
||||
.collect();
|
||||
|
||||
let workspace_uuid: Uuid = workspace_uuid_str.parse()?;
|
||||
let updated_row_ids =
|
||||
select_last_updated_database_row_ids(pg_pool, &workspace_uuid, &row_ids, after).await?;
|
||||
Ok(updated_row_ids)
|
||||
|
@ -892,15 +852,14 @@ pub async fn list_database_row_ids_updated(
|
|||
pub async fn list_database_row_details(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
uid: i64,
|
||||
workspace_uuid_str: String,
|
||||
database_uuid_str: String,
|
||||
row_ids: &[&str],
|
||||
workspace_uuid: Uuid,
|
||||
database_uuid: Uuid,
|
||||
row_ids: &[Uuid],
|
||||
unsupported_field_types: &[FieldType],
|
||||
with_doc: bool,
|
||||
) -> Result<Vec<AFDatabaseRowDetail>, AppError> {
|
||||
let (database_collab, db_body) =
|
||||
get_latest_collab_database_body(collab_storage, &workspace_uuid_str, &database_uuid_str)
|
||||
.await?;
|
||||
get_latest_collab_database_body(collab_storage, workspace_uuid, database_uuid).await?;
|
||||
|
||||
let all_fields: Vec<Field> = db_body
|
||||
.fields
|
||||
|
@ -917,12 +876,12 @@ pub async fn list_database_row_details(
|
|||
let query_collabs: Vec<QueryCollab> = row_ids
|
||||
.iter()
|
||||
.map(|id| QueryCollab {
|
||||
object_id: id.to_string(),
|
||||
object_id: *id,
|
||||
collab_type: CollabType::DatabaseRow,
|
||||
})
|
||||
.collect();
|
||||
let mut db_row_details = collab_storage
|
||||
.batch_get_collab(&uid, &workspace_uuid_str, query_collabs, true)
|
||||
.batch_get_collab(&uid, workspace_uuid, query_collabs, true)
|
||||
.await
|
||||
.into_iter()
|
||||
.flat_map(|(id, result)| match result {
|
||||
|
@ -934,14 +893,20 @@ pub async fn list_database_row_details(
|
|||
return None;
|
||||
},
|
||||
};
|
||||
let collab =
|
||||
match Collab::new_with_source(CollabOrigin::Server, &id, ec.into(), vec![], false) {
|
||||
Ok(collab) => collab,
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to create collab: {:?}", err);
|
||||
return None;
|
||||
},
|
||||
};
|
||||
let id = id.to_string();
|
||||
let collab = match Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
&id.to_string(),
|
||||
ec.into(),
|
||||
vec![],
|
||||
false,
|
||||
) {
|
||||
Ok(collab) => collab,
|
||||
Err(err) => {
|
||||
tracing::error!("Failed to create collab: {:?}", err);
|
||||
return None;
|
||||
},
|
||||
};
|
||||
let row_detail = match RowDetail::from_collab(&collab) {
|
||||
Some(row_detail) => row_detail,
|
||||
None => {
|
||||
|
@ -974,8 +939,10 @@ pub async fn list_database_row_details(
|
|||
.flat_map(|row| {
|
||||
row.id.parse::<Uuid>().ok().map(|row_uuid| {
|
||||
(
|
||||
row.id.clone(),
|
||||
meta_id_from_row_id(&row_uuid, RowMetaKey::DocumentId),
|
||||
row_uuid,
|
||||
meta_id_from_row_id(&row_uuid, RowMetaKey::DocumentId)
|
||||
.parse::<Uuid>()
|
||||
.unwrap(),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
@ -983,12 +950,12 @@ pub async fn list_database_row_details(
|
|||
let query_db_docs = doc_id_by_row_id
|
||||
.values()
|
||||
.map(|doc_id| QueryCollab {
|
||||
object_id: doc_id.to_string(),
|
||||
object_id: *doc_id,
|
||||
collab_type: CollabType::Document,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
let mut query_res = collab_storage
|
||||
.batch_get_collab(&uid, &workspace_uuid_str, query_db_docs, true)
|
||||
.batch_get_collab(&uid, workspace_uuid, query_db_docs, true)
|
||||
.await;
|
||||
for row_detail in &mut db_row_details {
|
||||
if let Err(err) = fill_in_db_row_doc(row_detail, &doc_id_by_row_id, &mut query_res) {
|
||||
|
@ -1002,16 +969,18 @@ pub async fn list_database_row_details(
|
|||
|
||||
fn fill_in_db_row_doc(
|
||||
row_detail: &mut AFDatabaseRowDetail,
|
||||
doc_id_by_row_id: &HashMap<String, String>,
|
||||
query_res: &mut HashMap<String, QueryCollabResult>,
|
||||
doc_id_by_row_id: &HashMap<Uuid, Uuid>,
|
||||
query_res: &mut HashMap<Uuid, QueryCollabResult>,
|
||||
) -> Result<(), AppError> {
|
||||
let doc_id = doc_id_by_row_id.get(&row_detail.id).ok_or_else(|| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to get document id for row id: {}",
|
||||
row_detail.id
|
||||
))
|
||||
})?;
|
||||
let res = query_res.remove(doc_id.as_str()).ok_or_else(|| {
|
||||
let doc_id = doc_id_by_row_id
|
||||
.get(&row_detail.id.parse()?)
|
||||
.ok_or_else(|| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to get document id for row id: {}",
|
||||
row_detail.id
|
||||
))
|
||||
})?;
|
||||
let res = query_res.remove(doc_id).ok_or_else(|| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to get document collab for row id: {}",
|
||||
row_detail.id
|
||||
|
@ -1023,13 +992,19 @@ fn fill_in_db_row_doc(
|
|||
QueryCollabResult::Failed { error } => return Err(AppError::Internal(anyhow::anyhow!(error))),
|
||||
};
|
||||
let ec = EncodedCollab::decode_from_bytes(&ec_bytes)?;
|
||||
let doc_collab = Collab::new_with_source(CollabOrigin::Server, doc_id, ec.into(), vec![], false)
|
||||
.map_err(|err| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to create document collab: {:?}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
let doc_collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
&doc_id.to_string(),
|
||||
ec.into(),
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.map_err(|err| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to create document collab: {:?}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
let doc = Document::open(doc_collab)
|
||||
.map_err(|err| AppError::Internal(anyhow::anyhow!("Failed to open document: {:?}", err)))?;
|
||||
let plain_text = doc.to_plain_text(true, false).map_err(|err| {
|
||||
|
|
|
@ -196,23 +196,23 @@ pub fn type_options_serde(
|
|||
|
||||
pub async fn get_latest_collab_database_row_body(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
workspace_uuid_str: &str,
|
||||
db_row_uuid_str: &str,
|
||||
workspace_id: Uuid,
|
||||
db_row_id: Uuid,
|
||||
) -> Result<(Collab, DatabaseRowBody), AppError> {
|
||||
let mut db_row_collab = get_latest_collab(
|
||||
collab_storage,
|
||||
GetCollabOrigin::Server,
|
||||
workspace_uuid_str,
|
||||
db_row_uuid_str,
|
||||
workspace_id,
|
||||
db_row_id,
|
||||
CollabType::DatabaseRow,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let row_id: RowId = db_row_uuid_str.to_string().into();
|
||||
let row_id: RowId = db_row_id.to_string().into();
|
||||
let db_row_body = DatabaseRowBody::open(row_id, &mut db_row_collab).map_err(|err| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to create database row body from collab, db_row_id: {}, err: {}",
|
||||
db_row_uuid_str,
|
||||
db_row_id,
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
@ -222,14 +222,14 @@ pub async fn get_latest_collab_database_row_body(
|
|||
|
||||
pub async fn get_latest_collab_database_body(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
workspace_uuid_str: &str,
|
||||
database_uuid_str: &str,
|
||||
workspace_id: Uuid,
|
||||
database_id: Uuid,
|
||||
) -> Result<(Collab, DatabaseBody), AppError> {
|
||||
let db_collab = get_latest_collab(
|
||||
collab_storage,
|
||||
GetCollabOrigin::Server,
|
||||
workspace_uuid_str,
|
||||
database_uuid_str,
|
||||
workspace_id,
|
||||
database_id,
|
||||
CollabType::Database,
|
||||
)
|
||||
.await?;
|
||||
|
@ -241,7 +241,7 @@ pub async fn get_latest_collab_database_body(
|
|||
.ok_or_else(|| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to create database body from collab, db_collab_id: {}",
|
||||
database_uuid_str,
|
||||
database_id,
|
||||
))
|
||||
})?;
|
||||
Ok((db_collab, db_body))
|
||||
|
@ -250,17 +250,17 @@ pub async fn get_latest_collab_database_body(
|
|||
pub async fn get_latest_collab_encoded(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
collab_origin: GetCollabOrigin,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: Uuid,
|
||||
object_id: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<EncodedCollab, AppError> {
|
||||
collab_storage
|
||||
.get_encode_collab(
|
||||
collab_origin,
|
||||
QueryCollabParams {
|
||||
workspace_id: workspace_id.to_string(),
|
||||
workspace_id,
|
||||
inner: QueryCollab {
|
||||
object_id: oid.to_string(),
|
||||
object_id,
|
||||
collab_type,
|
||||
},
|
||||
},
|
||||
|
@ -272,10 +272,10 @@ pub async fn get_latest_collab_encoded(
|
|||
pub async fn batch_get_latest_collab_encoded(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
collab_origin: GetCollabOrigin,
|
||||
workspace_id: &str,
|
||||
oid_list: &[String],
|
||||
workspace_id: Uuid,
|
||||
oid_list: &[Uuid],
|
||||
collab_type: CollabType,
|
||||
) -> Result<HashMap<String, EncodedCollab>, AppError> {
|
||||
) -> Result<HashMap<Uuid, EncodedCollab>, AppError> {
|
||||
let uid = match collab_origin {
|
||||
GetCollabOrigin::User { uid } => uid,
|
||||
_ => 0,
|
||||
|
@ -283,7 +283,7 @@ pub async fn batch_get_latest_collab_encoded(
|
|||
let queries: Vec<QueryCollab> = oid_list
|
||||
.iter()
|
||||
.map(|row_id| QueryCollab {
|
||||
object_id: row_id.to_string(),
|
||||
object_id: *row_id,
|
||||
collab_type,
|
||||
})
|
||||
.collect();
|
||||
|
@ -291,7 +291,7 @@ pub async fn batch_get_latest_collab_encoded(
|
|||
.batch_get_collab(&uid, workspace_id, queries, true)
|
||||
.await;
|
||||
let encoded_collabs = tokio::task::spawn_blocking(move || {
|
||||
let collabs: HashMap<String, EncodedCollab> = query_collab_results
|
||||
let collabs: HashMap<_, EncodedCollab> = query_collab_results
|
||||
.into_par_iter()
|
||||
.filter_map(|(oid, query_collab_result)| match query_collab_result {
|
||||
QueryCollabResult::Success { encode_collab_v1 } => {
|
||||
|
@ -319,18 +319,24 @@ pub async fn batch_get_latest_collab_encoded(
|
|||
pub async fn get_latest_collab(
|
||||
storage: &CollabAccessControlStorage,
|
||||
origin: GetCollabOrigin,
|
||||
workspace_id: &str,
|
||||
oid: &str,
|
||||
workspace_id: Uuid,
|
||||
oid: Uuid,
|
||||
collab_type: CollabType,
|
||||
) -> Result<Collab, AppError> {
|
||||
let ec = get_latest_collab_encoded(storage, origin, workspace_id, oid, collab_type).await?;
|
||||
let collab: Collab = Collab::new_with_source(CollabOrigin::Server, oid, ec.into(), vec![], false)
|
||||
.map_err(|e| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to create collab from encoded collab: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
let collab: Collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
&oid.to_string(),
|
||||
ec.into(),
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.map_err(|e| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to create collab from encoded collab: {:?}",
|
||||
e
|
||||
))
|
||||
})?;
|
||||
Ok(collab)
|
||||
}
|
||||
|
||||
|
@ -338,15 +344,14 @@ pub async fn get_latest_collab_workspace_database_body(
|
|||
pg_pool: &PgPool,
|
||||
storage: &CollabAccessControlStorage,
|
||||
origin: GetCollabOrigin,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
) -> Result<WorkspaceDatabaseBody, AppError> {
|
||||
let workspace_uuid = Uuid::parse_str(workspace_id)?;
|
||||
let ws_db_oid = select_workspace_database_oid(pg_pool, &workspace_uuid).await?;
|
||||
let ws_db_oid = select_workspace_database_oid(pg_pool, &workspace_id).await?;
|
||||
let mut collab = get_latest_collab(
|
||||
storage,
|
||||
origin,
|
||||
workspace_id,
|
||||
&ws_db_oid,
|
||||
ws_db_oid,
|
||||
CollabType::WorkspaceDatabase,
|
||||
)
|
||||
.await?;
|
||||
|
@ -362,7 +367,7 @@ pub async fn get_latest_collab_workspace_database_body(
|
|||
pub async fn get_latest_collab_folder(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
collab_origin: GetCollabOrigin,
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
) -> Result<Folder, AppError> {
|
||||
let folder_uid = if let GetCollabOrigin::User { uid } = collab_origin {
|
||||
uid
|
||||
|
@ -389,7 +394,7 @@ pub async fn get_latest_collab_folder(
|
|||
folder_uid,
|
||||
CollabOrigin::Server,
|
||||
encoded_collab.into(),
|
||||
workspace_id,
|
||||
&workspace_id.to_string(),
|
||||
vec![],
|
||||
)
|
||||
.map_err(|e| {
|
||||
|
@ -405,8 +410,8 @@ pub async fn get_latest_collab_folder(
|
|||
pub async fn get_latest_collab_document(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
collab_origin: GetCollabOrigin,
|
||||
workspace_id: &str,
|
||||
doc_oid: &str,
|
||||
workspace_id: Uuid,
|
||||
doc_oid: Uuid,
|
||||
) -> Result<Document, AppError> {
|
||||
let doc_collab = get_latest_collab(
|
||||
collab_storage,
|
||||
|
@ -451,10 +456,10 @@ pub async fn collab_to_doc_state(
|
|||
.await?
|
||||
}
|
||||
|
||||
pub fn collab_from_doc_state(doc_state: Vec<u8>, object_id: &str) -> Result<Collab, AppError> {
|
||||
pub fn collab_from_doc_state(doc_state: Vec<u8>, object_id: &Uuid) -> Result<Collab, AppError> {
|
||||
let collab = Collab::new_with_source(
|
||||
CollabOrigin::Server,
|
||||
object_id,
|
||||
&object_id.to_string(),
|
||||
DataSource::DocStateV1(doc_state),
|
||||
vec![],
|
||||
false,
|
||||
|
@ -516,17 +521,18 @@ pub async fn write_to_database_row(
|
|||
}
|
||||
|
||||
pub async fn create_row_document(
|
||||
workspace_id: &str,
|
||||
workspace_id: Uuid,
|
||||
uid: i64,
|
||||
new_doc_id: &str,
|
||||
new_doc_id: Uuid,
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
row_doc_content: String,
|
||||
) -> Result<CreatedRowDocument, AppError> {
|
||||
let md_importer = MDImporter::new(None);
|
||||
let new_doc_id_str = new_doc_id.to_string();
|
||||
let doc_data = md_importer
|
||||
.import(new_doc_id, row_doc_content)
|
||||
.import(&new_doc_id_str, row_doc_content)
|
||||
.map_err(|e| AppError::Internal(anyhow::anyhow!("Failed to import markdown: {:?}", e)))?;
|
||||
let doc = Document::create(new_doc_id, doc_data)
|
||||
let doc = Document::create(&new_doc_id_str, doc_data)
|
||||
.map_err(|e| AppError::Internal(anyhow::anyhow!("Failed to create document: {:?}", e)))?;
|
||||
let doc_ec = doc.encode_collab().map_err(|e| {
|
||||
AppError::Internal(anyhow::anyhow!("Failed to encode document collab: {:?}", e))
|
||||
|
@ -538,7 +544,11 @@ pub async fn create_row_document(
|
|||
let mut folder_txn = folder.collab.transact_mut();
|
||||
folder.body.views.insert(
|
||||
&mut folder_txn,
|
||||
collab_folder::View::orphan_view(new_doc_id, collab_folder::ViewLayout::Document, Some(uid)),
|
||||
collab_folder::View::orphan_view(
|
||||
&new_doc_id_str,
|
||||
collab_folder::ViewLayout::Document,
|
||||
Some(uid),
|
||||
),
|
||||
None,
|
||||
);
|
||||
folder_txn.encode_update_v1()
|
||||
|
@ -564,13 +574,13 @@ pub enum DocChanges {
|
|||
|
||||
pub async fn get_database_row_doc_changes(
|
||||
collab_storage: &CollabAccessControlStorage,
|
||||
workspace_uuid_str: &str,
|
||||
workspace_id: Uuid,
|
||||
row_doc_content: Option<String>,
|
||||
db_row_body: &DatabaseRowBody,
|
||||
db_row_txn: &mut yrs::TransactionMut<'_>,
|
||||
row_id: &str,
|
||||
row_id: &Uuid,
|
||||
uid: i64,
|
||||
) -> Result<Option<(String, DocChanges)>, AppError> {
|
||||
) -> Result<Option<(Uuid, DocChanges)>, AppError> {
|
||||
let row_doc_content = match row_doc_content {
|
||||
Some(row_doc_content) if !row_doc_content.is_empty() => row_doc_content,
|
||||
_ => return Ok(None),
|
||||
|
@ -582,11 +592,12 @@ pub async fn get_database_row_doc_changes(
|
|||
|
||||
match doc_id {
|
||||
Some(doc_id) => {
|
||||
let doc_uuid = Uuid::parse_str(&doc_id)?;
|
||||
let cur_doc = get_latest_collab_document(
|
||||
collab_storage,
|
||||
GetCollabOrigin::Server,
|
||||
workspace_uuid_str,
|
||||
&doc_id,
|
||||
workspace_id,
|
||||
doc_uuid,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -623,11 +634,14 @@ pub async fn get_database_row_doc_changes(
|
|||
}
|
||||
|
||||
let updated_doc = collab_to_bin(cur_doc_collab, CollabType::Document).await?;
|
||||
Ok(Some((doc_id, DocChanges::Update(updated_doc, doc_update))))
|
||||
Ok(Some((
|
||||
doc_uuid,
|
||||
DocChanges::Update(updated_doc, doc_update),
|
||||
)))
|
||||
},
|
||||
None => {
|
||||
// update row to indicate that the document is not empty
|
||||
let is_document_empty_id = meta_id_from_row_id(&row_id.parse()?, RowMetaKey::IsDocumentEmpty);
|
||||
let is_document_empty_id = meta_id_from_row_id(row_id, RowMetaKey::IsDocumentEmpty);
|
||||
db_row_body
|
||||
.get_meta()
|
||||
.insert(db_row_txn, is_document_empty_id, false);
|
||||
|
@ -638,10 +652,11 @@ pub async fn get_database_row_doc_changes(
|
|||
.map_err(|err| AppError::Internal(anyhow::anyhow!("Failed to get document id: {:?}", err)))?
|
||||
.ok_or_else(|| AppError::Internal(anyhow::anyhow!("Failed to get document id")))?;
|
||||
|
||||
let new_doc_id = Uuid::parse_str(&new_doc_id)?;
|
||||
let created_row_doc: CreatedRowDocument = create_row_document(
|
||||
workspace_uuid_str,
|
||||
workspace_id,
|
||||
uid,
|
||||
&new_doc_id,
|
||||
new_doc_id,
|
||||
collab_storage,
|
||||
row_doc_content,
|
||||
)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue