mirror of
https://github.com/AppFlowy-IO/AppFlowy-Cloud.git
synced 2025-04-19 03:24:42 -04:00
feat: implement import notion zip file api endpoint (#840)
* chore: implement api endpoint * chore: worker * chore: async zip * chore: test bulk insert * chore: insert collab * chore: custom task * chore: consume un acked message * chore: fix compile * chore: add test * test: update * chore: save import record * chore: save import record * chore: fix ci * chore: remove unused deps * chore: update docker file * chore: build worker images * chore: use small int * chore: use small int * chore: rm protoc deps in runtime * chore: move collab cache to database crate * chore: update test * chore: rm health api endpoint * chore: clippy * chore: update ci * chore: add test * chore: upgrade collab * chore: clippy * chore: update test * chore: use custom host * chore: config nginx * chore: install cert
This commit is contained in:
parent
2ff466bb11
commit
9c0dffacef
83 changed files with 3816 additions and 264 deletions
8
.github/workflows/integration_test.yml
vendored
8
.github/workflows/integration_test.yml
vendored
|
@ -42,16 +42,19 @@ jobs:
|
|||
|
||||
- name: Build Docker Images
|
||||
run: |
|
||||
docker compose build appflowy_cloud appflowy_history
|
||||
docker compose build appflowy_cloud appflowy_history appflowy_worker
|
||||
|
||||
- name: Push docker images to docker hub
|
||||
run: |
|
||||
docker tag appflowyinc/appflowy_cloud appflowyinc/appflowy_cloud:${GITHUB_SHA}
|
||||
docker tag appflowyinc/appflowy_history appflowyinc/appflowy_history:${GITHUB_SHA}
|
||||
docker tag appflowyinc/appflowy_worker appflowyinc/appflowy_worker:${GITHUB_SHA}
|
||||
echo ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} | docker login --username appflowyinc --password-stdin
|
||||
docker push appflowyinc/appflowy_cloud:${GITHUB_SHA}
|
||||
docker push appflowyinc/appflowy_history:${GITHUB_SHA}
|
||||
docker push appflowyinc/appflowy_worker:${GITHUB_SHA}
|
||||
APPFLOWY_HISTORY_VERSION=${GITHUB_SHA}
|
||||
APPFLOWY_WORKER_VERSION=${GITHUB_SHA}
|
||||
APPFLOWY_CLOUD_VERSION=0.1.1
|
||||
|
||||
test:
|
||||
|
@ -65,6 +68,8 @@ jobs:
|
|||
test_cmd: "--workspace --exclude appflowy-history --exclude appflowy-ai-client --features ai-test-enabled"
|
||||
- test_service: "appflowy_history"
|
||||
test_cmd: "-p appflowy-history"
|
||||
- test_service: "appflowy_worker"
|
||||
test_cmd: "-p appflowy-worker"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
@ -103,6 +108,7 @@ jobs:
|
|||
- name: Run Docker-Compose
|
||||
run: |
|
||||
export APPFLOWY_HISTORY_VERSION=${GITHUB_SHA}
|
||||
export APPFLOWY_WORKER_VERSION=${GITHUB_SHA}
|
||||
export APPFLOWY_CLOUD_VERSION=${GITHUB_SHA}
|
||||
docker compose -f docker-compose-ci.yml up -d
|
||||
docker ps -a
|
||||
|
|
97
.github/workflows/push_latest_docker.yml
vendored
97
.github/workflows/push_latest_docker.yml
vendored
|
@ -334,3 +334,100 @@ jobs:
|
|||
- name: Logout from Docker Hub
|
||||
if: always()
|
||||
run: docker logout
|
||||
|
||||
appflowy_worker_image:
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
IMAGE_NAME: ${{ secrets.DOCKER_HUB_USERNAME }}/appflowy_worker
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job:
|
||||
- { name: "amd64", docker_platform: "linux/amd64" }
|
||||
- { name: "arm64v8", docker_platform: "linux/arm64" }
|
||||
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Get git tag
|
||||
id: vars
|
||||
run: |
|
||||
T=${GITHUB_REF#refs/*/} # Remove "refs/*/" prefix from GITHUB_REF
|
||||
echo "GIT_TAG=$T" >> $GITHUB_ENV
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: registry.hub.docker.com/${{ env.IMAGE_NAME }}
|
||||
|
||||
- name: Build and push ${{ matrix.job.image_name }}:${{ env.GIT_TAG }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
platforms: ${{ matrix.job.docker_platform }}
|
||||
file: ./services/appflowy-worker/Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.IMAGE_NAME }}:${{ env.LATEST_TAG }}-${{ matrix.job.name }}
|
||||
${{ env.IMAGE_NAME }}:${{ env.GIT_TAG }}-${{ matrix.job.name }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
provenance: false
|
||||
|
||||
- name: Logout from Docker Hub
|
||||
if: always()
|
||||
run: docker logout
|
||||
|
||||
appflowy_worker_manifest:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [ appflowy_worker_image ]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job:
|
||||
- { image_name: "appflowy_worker" }
|
||||
|
||||
steps:
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Get git tag
|
||||
id: vars
|
||||
run: |
|
||||
T=${GITHUB_REF#refs/*/} # Remove "refs/*/" prefix from GITHUB_REF
|
||||
echo "GIT_TAG=$T" >> $GITHUB_ENV
|
||||
|
||||
- name: Create and push manifest for ${{ matrix.job.image_name }}:version
|
||||
uses: Noelware/docker-manifest-action@master
|
||||
with:
|
||||
inputs: ${{ secrets.DOCKER_HUB_USERNAME }}/${{ matrix.job.image_name }}:${{ env.GIT_TAG }}
|
||||
images: ${{ secrets.DOCKER_HUB_USERNAME }}/${{ matrix.job.image_name }}:${{ env.GIT_TAG }}-amd64,${{ secrets.DOCKER_HUB_USERNAME }}/${{ matrix.job.image_name }}:${{ env.GIT_TAG }}-arm64v8
|
||||
push: true
|
||||
|
||||
- name: Create and push manifest for ${{ matrix.job.image_name }}:latest
|
||||
uses: Noelware/docker-manifest-action@master
|
||||
with:
|
||||
inputs: ${{ secrets.DOCKER_HUB_USERNAME }}/${{ matrix.job.image_name }}:${{ env.LATEST_TAG }}
|
||||
images: ${{ secrets.DOCKER_HUB_USERNAME }}/${{ matrix.job.image_name }}:${{ env.LATEST_TAG }}-amd64,${{ secrets.DOCKER_HUB_USERNAME }}/${{ matrix.job.image_name }}:${{ env.LATEST_TAG }}-arm64v8
|
||||
push: true
|
||||
|
||||
- name: Logout from Docker Hub
|
||||
if: always()
|
||||
run: docker logout
|
||||
|
|
22
.sqlx/query-081abcd7f80664e8acd205833b0f9ca43bc1ccc03d992e7b1c45c3e401a6007a.json
generated
Normal file
22
.sqlx/query-081abcd7f80664e8acd205833b0f9ca43bc1ccc03d992e7b1c45c3e401a6007a.json
generated
Normal file
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT\n database_storage_id\n FROM public.af_workspace\n WHERE workspace_id = $1\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"ordinal": 0,
|
||||
"name": "database_storage_id",
|
||||
"type_info": "Uuid"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Uuid"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "081abcd7f80664e8acd205833b0f9ca43bc1ccc03d992e7b1c45c3e401a6007a"
|
||||
}
|
15
.sqlx/query-1c8f022ff5add11376dbbc17efd874dd31fd908c4f17be1bded18dbc689e3b36.json
generated
Normal file
15
.sqlx/query-1c8f022ff5add11376dbbc17efd874dd31fd908c4f17be1bded18dbc689e3b36.json
generated
Normal file
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n UPDATE public.af_workspace\n SET is_initialized = $2\n WHERE workspace_id = $1\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Uuid",
|
||||
"Bool"
|
||||
]
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "1c8f022ff5add11376dbbc17efd874dd31fd908c4f17be1bded18dbc689e3b36"
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n WITH new_workspace AS (\n INSERT INTO public.af_workspace (owner_uid, workspace_name)\n VALUES ((SELECT uid FROM public.af_user WHERE uuid = $1), $2)\n RETURNING *\n )\n SELECT\n workspace_id,\n database_storage_id,\n owner_uid,\n owner_profile.name AS owner_name,\n owner_profile.email AS owner_email,\n new_workspace.created_at,\n workspace_type,\n new_workspace.deleted_at,\n workspace_name,\n icon\n FROM new_workspace\n JOIN public.af_user AS owner_profile ON new_workspace.owner_uid = owner_profile.uid;\n ",
|
||||
"query": "\n WITH new_workspace AS (\n INSERT INTO public.af_workspace (owner_uid, workspace_name, is_initialized)\n VALUES ((SELECT uid FROM public.af_user WHERE uuid = $1), $2, $3)\n RETURNING *\n )\n SELECT\n workspace_id,\n database_storage_id,\n owner_uid,\n owner_profile.name AS owner_name,\n owner_profile.email AS owner_email,\n new_workspace.created_at,\n workspace_type,\n new_workspace.deleted_at,\n workspace_name,\n icon\n FROM new_workspace\n JOIN public.af_user AS owner_profile ON new_workspace.owner_uid = owner_profile.uid;\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
|
@ -57,7 +57,8 @@
|
|||
"parameters": {
|
||||
"Left": [
|
||||
"Uuid",
|
||||
"Text"
|
||||
"Text",
|
||||
"Bool"
|
||||
]
|
||||
},
|
||||
"nullable": [
|
||||
|
@ -73,5 +74,5 @@
|
|||
false
|
||||
]
|
||||
},
|
||||
"hash": "cf7b8baaba35e74671911e13f1efcdfa3a642d2b7276c2a81f877a6217a0d473"
|
||||
"hash": "4d3e6fd7528c1653e823d9236f0886e73d1be4f065e06166031846a2eab16d83"
|
||||
}
|
20
.sqlx/query-53cedeff683b699e30498f5976112d387f67b223a0d134b5887ab6a44e16df7f.json
generated
Normal file
20
.sqlx/query-53cedeff683b699e30498f5976112d387f67b223a0d134b5887ab6a44e16df7f.json
generated
Normal file
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n INSERT INTO af_collab (oid, blob, len, partition_key, encrypt, owner_uid, workspace_id)\n SELECT * FROM UNNEST($1::uuid[], $2::bytea[], $3::int[], $4::int[], $5::int[], $6::bigint[], $7::uuid[])\n ON CONFLICT (oid, partition_key)\n DO NOTHING;\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"UuidArray",
|
||||
"ByteaArray",
|
||||
"Int4Array",
|
||||
"Int4Array",
|
||||
"Int4Array",
|
||||
"Int8Array",
|
||||
"UuidArray"
|
||||
]
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "53cedeff683b699e30498f5976112d387f67b223a0d134b5887ab6a44e16df7f"
|
||||
}
|
20
.sqlx/query-711debcb02d19b1de4ff7dfcf77c2a431c9fe83ffd362d41b7f0f14a2f241d4b.json
generated
Normal file
20
.sqlx/query-711debcb02d19b1de4ff7dfcf77c2a431c9fe83ffd362d41b7f0f14a2f241d4b.json
generated
Normal file
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT rp.permission_id\n FROM af_role_permissions rp\n JOIN af_roles ON rp.role_id = af_roles.id\n WHERE af_roles.name = 'Owner';\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"ordinal": 0,
|
||||
"name": "permission_id",
|
||||
"type_info": "Int4"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Left": []
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
},
|
||||
"hash": "711debcb02d19b1de4ff7dfcf77c2a431c9fe83ffd362d41b7f0f14a2f241d4b"
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n SELECT\n w.workspace_id,\n w.database_storage_id,\n w.owner_uid,\n u.name AS owner_name,\n u.email AS owner_email,\n w.created_at,\n w.workspace_type,\n w.deleted_at,\n w.workspace_name,\n w.icon\n FROM af_workspace w\n JOIN af_workspace_member wm ON w.workspace_id = wm.workspace_id\n JOIN public.af_user u ON w.owner_uid = u.uid\n WHERE wm.uid = (\n SELECT uid FROM public.af_user WHERE uuid = $1\n );\n ",
|
||||
"query": "\n SELECT\n w.workspace_id,\n w.database_storage_id,\n w.owner_uid,\n u.name AS owner_name,\n u.email AS owner_email,\n w.created_at,\n w.workspace_type,\n w.deleted_at,\n w.workspace_name,\n w.icon\n FROM af_workspace w\n JOIN af_workspace_member wm ON w.workspace_id = wm.workspace_id\n JOIN public.af_user u ON w.owner_uid = u.uid\n WHERE wm.uid = (\n SELECT uid FROM public.af_user WHERE uuid = $1\n )\n AND COALESCE(w.is_initialized, true) = true;\n ",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
|
@ -72,5 +72,5 @@
|
|||
false
|
||||
]
|
||||
},
|
||||
"hash": "dbebcabe81603dca27ad9fc5a5df0f1e56a62016246c5a522423102a9e9b6dae"
|
||||
"hash": "a603b05473649d17748978ca6e10ce79c7b63ba219b18f7b49f5dd919689bb9b"
|
||||
}
|
16
.sqlx/query-ceaa58cd92cd2a4c554c2a57fee11fbfd0bdb2b0e1b777d8533ae7c37d3b69e2.json
generated
Normal file
16
.sqlx/query-ceaa58cd92cd2a4c554c2a57fee11fbfd0bdb2b0e1b777d8533ae7c37d3b69e2.json
generated
Normal file
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"db_name": "PostgreSQL",
|
||||
"query": "\n INSERT INTO af_collab_member (uid, oid, permission_id)\n SELECT * FROM UNNEST($1::bigint[], $2::uuid[], $3::int[])\n ON CONFLICT (uid, oid)\n DO NOTHING;\n ",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Int8Array",
|
||||
"UuidArray",
|
||||
"Int4Array"
|
||||
]
|
||||
},
|
||||
"nullable": []
|
||||
},
|
||||
"hash": "ceaa58cd92cd2a4c554c2a57fee11fbfd0bdb2b0e1b777d8533ae7c37d3b69e2"
|
||||
}
|
411
Cargo.lock
generated
411
Cargo.lock
generated
|
@ -625,7 +625,7 @@ dependencies = [
|
|||
"database-entity",
|
||||
"derive_more",
|
||||
"dotenvy",
|
||||
"fancy-regex",
|
||||
"fancy-regex 0.11.0",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"gotrue",
|
||||
|
@ -659,6 +659,7 @@ dependencies = [
|
|||
"serde",
|
||||
"serde_json",
|
||||
"serde_repr",
|
||||
"sha2",
|
||||
"shared-entity",
|
||||
"snowflake",
|
||||
"sqlx",
|
||||
|
@ -674,6 +675,7 @@ dependencies = [
|
|||
"tracing",
|
||||
"tracing-bunyan-formatter",
|
||||
"tracing-subscriber",
|
||||
"unicode-normalization",
|
||||
"unicode-segmentation",
|
||||
"url",
|
||||
"uuid",
|
||||
|
@ -785,6 +787,51 @@ dependencies = [
|
|||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "appflowy-worker"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_zip",
|
||||
"aws-config",
|
||||
"aws-sdk-s3",
|
||||
"axum 0.7.5",
|
||||
"bytes",
|
||||
"collab",
|
||||
"collab-database",
|
||||
"collab-entity",
|
||||
"collab-folder",
|
||||
"collab-importer",
|
||||
"database",
|
||||
"database-entity",
|
||||
"dotenvy",
|
||||
"futures",
|
||||
"infra",
|
||||
"mime_guess",
|
||||
"redis 0.25.4",
|
||||
"secrecy",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_repr",
|
||||
"sqlx",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arbitrary"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110"
|
||||
dependencies = [
|
||||
"derive_arbitrary",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.7.1"
|
||||
|
@ -902,6 +949,24 @@ dependencies = [
|
|||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-compression"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5"
|
||||
dependencies = [
|
||||
"bzip2",
|
||||
"deflate64",
|
||||
"flate2",
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"memchr",
|
||||
"pin-project-lite",
|
||||
"xz2",
|
||||
"zstd",
|
||||
"zstd-safe",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-lock"
|
||||
version = "3.4.0"
|
||||
|
@ -913,6 +978,17 @@ dependencies = [
|
|||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-recursion"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.72",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async-stream"
|
||||
version = "0.3.5"
|
||||
|
@ -937,15 +1013,31 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "async-trait"
|
||||
version = "0.1.81"
|
||||
version = "0.1.83"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107"
|
||||
checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.72",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "async_zip"
|
||||
version = "0.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "00b9f7252833d5ed4b00aa9604b563529dd5e11de9c23615de2dcdf91eb87b52"
|
||||
dependencies = [
|
||||
"async-compression",
|
||||
"chrono",
|
||||
"crc32fast",
|
||||
"futures-lite",
|
||||
"pin-project",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atoi"
|
||||
version = "2.0.0"
|
||||
|
@ -1771,6 +1863,27 @@ dependencies = [
|
|||
"bytes",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bzip2"
|
||||
version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8"
|
||||
dependencies = [
|
||||
"bzip2-sys",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bzip2-sys"
|
||||
version = "0.1.11+1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "camino"
|
||||
version = "1.1.7"
|
||||
|
@ -1872,6 +1985,27 @@ dependencies = [
|
|||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono-tz"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd6dd8046d00723a59a2f8c5f295c515b9bb9a331ee4f8f3d4dd49e428acd3b6"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"chrono-tz-build",
|
||||
"phf 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono-tz-build"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e94fea34d77a245229e7746bd2beb786cd2a896f306ff491fb8cecb3074b10a7"
|
||||
dependencies = [
|
||||
"parse-zoneinfo",
|
||||
"phf_codegen 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chumsky"
|
||||
version = "0.9.3"
|
||||
|
@ -1970,6 +2104,7 @@ dependencies = [
|
|||
"infra",
|
||||
"lazy_static",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"parking_lot 0.12.3",
|
||||
"percent-encoding",
|
||||
"pin-project",
|
||||
|
@ -2015,6 +2150,7 @@ version = "0.1.0"
|
|||
dependencies = [
|
||||
"anyhow",
|
||||
"assert-json-diff",
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"client-api",
|
||||
"client-websocket",
|
||||
|
@ -2094,7 +2230,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "collab"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=6095da99533c4547364f71dfb7c0db3a1bace562#6095da99533c4547364f71dfb7c0db3a1bace562"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d7dc26a906e3ce5d72a309e933f853f1e75da1cb#d7dc26a906e3ce5d72a309e933f853f1e75da1cb"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
|
@ -2119,24 +2255,31 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "collab-database"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=6095da99533c4547364f71dfb7c0db3a1bace562#6095da99533c4547364f71dfb7c0db3a1bace562"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d7dc26a906e3ce5d72a309e933f853f1e75da1cb#d7dc26a906e3ce5d72a309e933f853f1e75da1cb"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"chrono-tz",
|
||||
"collab",
|
||||
"collab-entity",
|
||||
"csv",
|
||||
"dashmap 5.5.3",
|
||||
"fancy-regex 0.13.0",
|
||||
"futures",
|
||||
"getrandom 0.2.15",
|
||||
"js-sys",
|
||||
"lazy_static",
|
||||
"nanoid",
|
||||
"percent-encoding",
|
||||
"rayon",
|
||||
"rust_decimal",
|
||||
"rusty-money",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_repr",
|
||||
"sha2",
|
||||
"strum",
|
||||
"strum_macros",
|
||||
"thiserror",
|
||||
|
@ -2151,13 +2294,14 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "collab-document"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=6095da99533c4547364f71dfb7c0db3a1bace562#6095da99533c4547364f71dfb7c0db3a1bace562"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d7dc26a906e3ce5d72a309e933f853f1e75da1cb#d7dc26a906e3ce5d72a309e933f853f1e75da1cb"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
"collab",
|
||||
"collab-entity",
|
||||
"getrandom 0.2.15",
|
||||
"markdown",
|
||||
"nanoid",
|
||||
"serde",
|
||||
"serde_json",
|
||||
|
@ -2171,7 +2315,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "collab-entity"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=6095da99533c4547364f71dfb7c0db3a1bace562#6095da99533c4547364f71dfb7c0db3a1bace562"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d7dc26a906e3ce5d72a309e933f853f1e75da1cb#d7dc26a906e3ce5d72a309e933f853f1e75da1cb"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"bytes",
|
||||
|
@ -2191,7 +2335,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "collab-folder"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=6095da99533c4547364f71dfb7c0db3a1bace562#6095da99533c4547364f71dfb7c0db3a1bace562"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d7dc26a906e3ce5d72a309e933f853f1e75da1cb#d7dc26a906e3ce5d72a309e933f853f1e75da1cb"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
|
@ -2207,6 +2351,40 @@ dependencies = [
|
|||
"tokio",
|
||||
"tokio-stream",
|
||||
"tracing",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "collab-importer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d7dc26a906e3ce5d72a309e933f853f1e75da1cb#d7dc26a906e3ce5d72a309e933f853f1e75da1cb"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"collab",
|
||||
"collab-database",
|
||||
"collab-document",
|
||||
"collab-entity",
|
||||
"collab-folder",
|
||||
"fancy-regex 0.13.0",
|
||||
"futures",
|
||||
"fxhash",
|
||||
"hex",
|
||||
"markdown",
|
||||
"percent-encoding",
|
||||
"rayon",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"tracing",
|
||||
"uuid",
|
||||
"walkdir",
|
||||
"zip",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2276,7 +2454,7 @@ dependencies = [
|
|||
[[package]]
|
||||
name = "collab-user"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=6095da99533c4547364f71dfb7c0db3a1bace562#6095da99533c4547364f71dfb7c0db3a1bace562"
|
||||
source = "git+https://github.com/AppFlowy-IO/AppFlowy-Collab?rev=d7dc26a906e3ce5d72a309e933f853f1e75da1cb#d7dc26a906e3ce5d72a309e933f853f1e75da1cb"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"collab",
|
||||
|
@ -2354,6 +2532,12 @@ dependencies = [
|
|||
"tiny-keccak",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "constant_time_eq"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6"
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.4.0"
|
||||
|
@ -2774,6 +2958,7 @@ dependencies = [
|
|||
"collab-rt-entity",
|
||||
"database-entity",
|
||||
"futures-util",
|
||||
"itertools 0.12.1",
|
||||
"pgvector",
|
||||
"redis 0.25.4",
|
||||
"rust_decimal",
|
||||
|
@ -2819,6 +3004,12 @@ dependencies = [
|
|||
"byteorder",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deflate64"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da692b8d1080ea3045efaab14434d40468c3d8657e42abddfffca87b428f4c1b"
|
||||
|
||||
[[package]]
|
||||
name = "der"
|
||||
version = "0.6.1"
|
||||
|
@ -2863,6 +3054,17 @@ dependencies = [
|
|||
"powerfmt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_arbitrary"
|
||||
version = "1.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.72",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_more"
|
||||
version = "0.99.18"
|
||||
|
@ -3077,6 +3279,17 @@ dependencies = [
|
|||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fancy-regex"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2"
|
||||
dependencies = [
|
||||
"bit-set",
|
||||
"regex-automata 0.4.7",
|
||||
"regex-syntax 0.8.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fastrand"
|
||||
version = "2.1.0"
|
||||
|
@ -3234,6 +3447,19 @@ version = "0.3.30"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
|
||||
|
||||
[[package]]
|
||||
name = "futures-lite"
|
||||
version = "2.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5"
|
||||
dependencies = [
|
||||
"fastrand",
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"parking",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "futures-macro"
|
||||
version = "0.3.30"
|
||||
|
@ -4198,6 +4424,12 @@ dependencies = [
|
|||
"scopeguard",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lockfree-object-pool"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9374ef4228402d4b7e403e5838cb880d9ee663314b0a900d5a6aabf0c213552e"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.22"
|
||||
|
@ -4213,12 +4445,42 @@ dependencies = [
|
|||
"hashbrown 0.14.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lzma-rs"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "297e814c836ae64db86b36cf2a557ba54368d03f6afcd7d947c266692f71115e"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"crc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lzma-sys"
|
||||
version = "0.1.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mac"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4"
|
||||
|
||||
[[package]]
|
||||
name = "markdown"
|
||||
version = "1.0.0-alpha.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6491e6c702bf7e3b24e769d800746d5f2c06a6c6a2db7992612e0f429029e81"
|
||||
dependencies = [
|
||||
"unicode-id",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markup5ever"
|
||||
version = "0.11.0"
|
||||
|
@ -4227,7 +4489,7 @@ checksum = "7a2629bb1404f3d34c2e921f21fd34ba00b206124c81f65c50b43b6aaefeb016"
|
|||
dependencies = [
|
||||
"log",
|
||||
"phf 0.10.1",
|
||||
"phf_codegen",
|
||||
"phf_codegen 0.10.0",
|
||||
"string_cache",
|
||||
"string_cache_codegen",
|
||||
"tendril",
|
||||
|
@ -4702,6 +4964,15 @@ version = "1.0.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "944553dd59c802559559161f9816429058b869003836120e262e8caec061b7ae"
|
||||
|
||||
[[package]]
|
||||
name = "parse-zoneinfo"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24"
|
||||
dependencies = [
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "password-hash"
|
||||
version = "0.5.0"
|
||||
|
@ -4719,6 +4990,16 @@ version = "1.0.15"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
|
||||
|
||||
[[package]]
|
||||
name = "pbkdf2"
|
||||
version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2"
|
||||
dependencies = [
|
||||
"digest",
|
||||
"hmac",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pem"
|
||||
version = "1.1.1"
|
||||
|
@ -4836,6 +5117,16 @@ dependencies = [
|
|||
"phf_shared 0.10.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_codegen"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a"
|
||||
dependencies = [
|
||||
"phf_generator 0.11.2",
|
||||
"phf_shared 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_generator"
|
||||
version = "0.10.0"
|
||||
|
@ -5623,6 +5914,7 @@ dependencies = [
|
|||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"native-tls",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
|
@ -5831,9 +6123,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "rust_decimal"
|
||||
version = "1.35.0"
|
||||
version = "1.36.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a"
|
||||
checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"borsh",
|
||||
|
@ -5845,6 +6137,16 @@ dependencies = [
|
|||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rust_decimal_macros"
|
||||
version = "1.36.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da991f231869f34268415a49724c6578e740ad697ba0999199d6f22b3949332c"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"rust_decimal",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-demangle"
|
||||
version = "0.1.24"
|
||||
|
@ -5978,6 +6280,16 @@ version = "1.0.17"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6"
|
||||
|
||||
[[package]]
|
||||
name = "rusty-money"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b28f881005eac7ad8d46b6f075da5f322bd7f4f83a38720fc069694ddadd683"
|
||||
dependencies = [
|
||||
"rust_decimal",
|
||||
"rust_decimal_macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.18"
|
||||
|
@ -6128,7 +6440,7 @@ dependencies = [
|
|||
"log",
|
||||
"new_debug_unreachable",
|
||||
"phf 0.10.1",
|
||||
"phf_codegen",
|
||||
"phf_codegen 0.10.0",
|
||||
"precomputed-hash",
|
||||
"servo_arc",
|
||||
"smallvec",
|
||||
|
@ -6365,6 +6677,12 @@ dependencies = [
|
|||
"rand_core 0.6.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "simd-adler32"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe"
|
||||
|
||||
[[package]]
|
||||
name = "simdutf8"
|
||||
version = "0.1.4"
|
||||
|
@ -7166,12 +7484,13 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.11"
|
||||
version = "0.7.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1"
|
||||
checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"futures-sink",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
|
@ -7502,6 +7821,12 @@ version = "0.3.15"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-id"
|
||||
version = "0.3.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "10103c57044730945224467c09f71a4db0071c123a0648cc3e818913bde6b561"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-ident"
|
||||
version = "1.0.12"
|
||||
|
@ -7510,9 +7835,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
|
|||
|
||||
[[package]]
|
||||
name = "unicode-normalization"
|
||||
version = "0.1.23"
|
||||
version = "0.1.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5"
|
||||
checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
|
||||
dependencies = [
|
||||
"tinyvec",
|
||||
]
|
||||
|
@ -8196,6 +8521,15 @@ dependencies = [
|
|||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "xz2"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2"
|
||||
dependencies = [
|
||||
"lzma-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "yasna"
|
||||
version = "0.5.2"
|
||||
|
@ -8284,6 +8618,49 @@ dependencies = [
|
|||
"syn 2.0.72",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zip"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc5e4288ea4057ae23afc69a4472434a87a2495cafce6632fd1c4ec9f5cf3494"
|
||||
dependencies = [
|
||||
"aes",
|
||||
"arbitrary",
|
||||
"bzip2",
|
||||
"constant_time_eq",
|
||||
"crc32fast",
|
||||
"crossbeam-utils",
|
||||
"deflate64",
|
||||
"displaydoc",
|
||||
"flate2",
|
||||
"hmac",
|
||||
"indexmap 2.3.0",
|
||||
"lzma-rs",
|
||||
"memchr",
|
||||
"pbkdf2",
|
||||
"rand 0.8.5",
|
||||
"sha1",
|
||||
"thiserror",
|
||||
"time",
|
||||
"zeroize",
|
||||
"zopfli",
|
||||
"zstd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zopfli"
|
||||
version = "0.8.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5019f391bac5cf252e93bbcc53d039ffd62c7bfb7c150414d61369afe57e946"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"crc32fast",
|
||||
"lockfree-object-pool",
|
||||
"log",
|
||||
"once_cell",
|
||||
"simd-adler32",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zstd"
|
||||
version = "0.13.2"
|
||||
|
|
165
Cargo.toml
165
Cargo.toml
|
@ -9,9 +9,9 @@ edition = "2021"
|
|||
actix.workspace = true
|
||||
actix-web.workspace = true
|
||||
actix-http = { workspace = true, default-features = false, features = [
|
||||
"openssl",
|
||||
"compress-brotli",
|
||||
"compress-gzip",
|
||||
"openssl",
|
||||
"compress-brotli",
|
||||
"compress-gzip",
|
||||
] }
|
||||
actix-rt = "2.9.0"
|
||||
actix-web-actors = { version = "4.3" }
|
||||
|
@ -28,20 +28,20 @@ serde_repr.workspace = true
|
|||
serde.workspace = true
|
||||
|
||||
tokio = { workspace = true, features = [
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
"sync",
|
||||
"fs",
|
||||
"time",
|
||||
"full",
|
||||
"macros",
|
||||
"rt-multi-thread",
|
||||
"sync",
|
||||
"fs",
|
||||
"time",
|
||||
"full",
|
||||
] }
|
||||
tokio-stream.workspace = true
|
||||
tokio-util = { version = "0.7.10", features = ["io"] }
|
||||
futures-util = { workspace = true, features = ["std", "io"] }
|
||||
once_cell = "1.19.0"
|
||||
chrono = { version = "0.4.37", features = [
|
||||
"serde",
|
||||
"clock",
|
||||
"serde",
|
||||
"clock",
|
||||
], default-features = false }
|
||||
derive_more = { version = "0.99" }
|
||||
secrecy.workspace = true
|
||||
|
@ -49,10 +49,10 @@ rand = { version = "0.8", features = ["std_rng"] }
|
|||
anyhow = "1.0.79"
|
||||
thiserror = "1.0.56"
|
||||
reqwest = { workspace = true, features = [
|
||||
"json",
|
||||
"rustls-tls",
|
||||
"cookies",
|
||||
"stream",
|
||||
"json",
|
||||
"rustls-tls",
|
||||
"cookies",
|
||||
"stream",
|
||||
] }
|
||||
unicode-segmentation = "1.10"
|
||||
lazy_static.workspace = true
|
||||
|
@ -62,31 +62,31 @@ bytes = "1.5.0"
|
|||
rcgen = { version = "0.10.0", features = ["pem", "x509-parser"] }
|
||||
mime = "0.3.17"
|
||||
aws-sdk-s3 = { version = "1.36.0", features = [
|
||||
"behavior-version-latest",
|
||||
"rt-tokio",
|
||||
"behavior-version-latest",
|
||||
"rt-tokio",
|
||||
] }
|
||||
aws-config = { version = "1.5.1", features = ["behavior-version-latest"] }
|
||||
redis = { workspace = true, features = [
|
||||
"json",
|
||||
"tokio-comp",
|
||||
"connection-manager",
|
||||
"json",
|
||||
"tokio-comp",
|
||||
"connection-manager",
|
||||
] }
|
||||
tracing = { version = "0.1.40", features = ["log"] }
|
||||
tracing-subscriber = { version = "0.3.18", features = [
|
||||
"registry",
|
||||
"env-filter",
|
||||
"ansi",
|
||||
"json",
|
||||
"tracing-log",
|
||||
"registry",
|
||||
"env-filter",
|
||||
"ansi",
|
||||
"json",
|
||||
"tracing-log",
|
||||
] }
|
||||
tracing-bunyan-formatter = "0.3.9"
|
||||
sqlx = { workspace = true, default-features = false, features = [
|
||||
"runtime-tokio-rustls",
|
||||
"macros",
|
||||
"postgres",
|
||||
"uuid",
|
||||
"chrono",
|
||||
"migrate",
|
||||
"runtime-tokio-rustls",
|
||||
"macros",
|
||||
"postgres",
|
||||
"uuid",
|
||||
"chrono",
|
||||
"migrate",
|
||||
] }
|
||||
async-trait.workspace = true
|
||||
prometheus-client.workspace = true
|
||||
|
@ -130,10 +130,10 @@ authentication.workspace = true
|
|||
access-control.workspace = true
|
||||
workspace-access.workspace = true
|
||||
app-error = { workspace = true, features = [
|
||||
"sqlx_error",
|
||||
"actix_web_error",
|
||||
"tokio_error",
|
||||
"appflowy_ai_error",
|
||||
"sqlx_error",
|
||||
"actix_web_error",
|
||||
"tokio_error",
|
||||
"appflowy_ai_error",
|
||||
] }
|
||||
shared-entity = { path = "libs/shared-entity", features = ["cloud"] }
|
||||
workspace-template = { workspace = true }
|
||||
|
@ -147,6 +147,7 @@ lettre = { version = "0.11.7", features = ["tokio1", "tokio1-native-tls"] }
|
|||
handlebars = "5.1.2"
|
||||
pin-project = "1.1.5"
|
||||
byteorder = "1.5.0"
|
||||
sha2 = "0.10.8"
|
||||
rayon.workspace = true
|
||||
|
||||
|
||||
|
@ -157,16 +158,17 @@ assert-json-diff = "2.0.2"
|
|||
scraper = "0.17.1"
|
||||
client-api-test = { path = "libs/client-api-test", features = ["collab-sync"] }
|
||||
client-api = { path = "libs/client-api", features = [
|
||||
"collab-sync",
|
||||
"test_util",
|
||||
"sync_verbose_log",
|
||||
"test_fast_sync",
|
||||
"enable_brotli",
|
||||
"collab-sync",
|
||||
"test_util",
|
||||
"sync_verbose_log",
|
||||
"test_fast_sync",
|
||||
"enable_brotli",
|
||||
] }
|
||||
opener = "0.6.1"
|
||||
image = "0.23.14"
|
||||
collab-rt-entity.workspace = true
|
||||
hex = "0.4.3"
|
||||
unicode-normalization = "0.1.24"
|
||||
|
||||
[[bin]]
|
||||
name = "appflowy_cloud"
|
||||
|
@ -181,37 +183,38 @@ path = "src/lib.rs"
|
|||
|
||||
[workspace]
|
||||
members = [
|
||||
# libs
|
||||
"libs/snowflake",
|
||||
"libs/collab-rt-entity",
|
||||
"libs/database",
|
||||
"libs/database-entity",
|
||||
"libs/client-api",
|
||||
"libs/infra",
|
||||
"libs/shared-entity",
|
||||
"libs/gotrue",
|
||||
"libs/gotrue-entity",
|
||||
"admin_frontend",
|
||||
"libs/app-error",
|
||||
"libs/workspace-access",
|
||||
"libs/workspace-template",
|
||||
"libs/encrypt",
|
||||
"libs/authentication",
|
||||
"libs/access-control",
|
||||
"libs/collab-rt-protocol",
|
||||
"libs/collab-stream",
|
||||
"libs/client-websocket",
|
||||
"libs/client-api-test",
|
||||
"libs/wasm-test",
|
||||
"libs/client-api-wasm",
|
||||
"libs/appflowy-ai-client",
|
||||
"libs/client-api-entity",
|
||||
# services
|
||||
"services/appflowy-history",
|
||||
"services/appflowy-collaborate",
|
||||
# xtask
|
||||
"xtask",
|
||||
"libs/tonic-proto",
|
||||
# libs
|
||||
"libs/snowflake",
|
||||
"libs/collab-rt-entity",
|
||||
"libs/database",
|
||||
"libs/database-entity",
|
||||
"libs/client-api",
|
||||
"libs/infra",
|
||||
"libs/shared-entity",
|
||||
"libs/gotrue",
|
||||
"libs/gotrue-entity",
|
||||
"admin_frontend",
|
||||
"libs/app-error",
|
||||
"libs/workspace-access",
|
||||
"libs/workspace-template",
|
||||
"libs/encrypt",
|
||||
"libs/authentication",
|
||||
"libs/access-control",
|
||||
"libs/collab-rt-protocol",
|
||||
"libs/collab-stream",
|
||||
"libs/client-websocket",
|
||||
"libs/client-api-test",
|
||||
"libs/wasm-test",
|
||||
"libs/client-api-wasm",
|
||||
"libs/appflowy-ai-client",
|
||||
"libs/client-api-entity",
|
||||
# services
|
||||
"services/appflowy-history",
|
||||
"services/appflowy-collaborate",
|
||||
"services/appflowy-worker",
|
||||
# xtask
|
||||
"xtask",
|
||||
"libs/tonic-proto",
|
||||
]
|
||||
|
||||
[workspace.dependencies]
|
||||
|
@ -240,9 +243,9 @@ uuid = { version = "1.6.1", features = ["v4", "v5"] }
|
|||
anyhow = "1.0.79"
|
||||
actix = "0.13.3"
|
||||
actix-web = { version = "4.5.1", default-features = false, features = [
|
||||
"openssl",
|
||||
"compress-brotli",
|
||||
"compress-gzip",
|
||||
"openssl",
|
||||
"compress-brotli",
|
||||
"compress-gzip",
|
||||
] }
|
||||
actix-http = { version = "3.6.0", default-features = false }
|
||||
tokio = { version = "1.36.0", features = ["sync"] }
|
||||
|
@ -276,6 +279,7 @@ collab-folder = { version = "0.2.0" }
|
|||
collab-document = { version = "0.2.0" }
|
||||
collab-database = { version = "0.2.0" }
|
||||
collab-user = { version = "0.2.0" }
|
||||
collab-importer = { version = "0.1.0" }
|
||||
|
||||
[profile.release]
|
||||
lto = true
|
||||
|
@ -289,12 +293,13 @@ debug = true
|
|||
[patch.crates-io]
|
||||
# It's diffcult to resovle different version with the same crate used in AppFlowy Frontend and the Client-API crate.
|
||||
# So using patch to workaround this issue.
|
||||
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "6095da99533c4547364f71dfb7c0db3a1bace562" }
|
||||
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "6095da99533c4547364f71dfb7c0db3a1bace562" }
|
||||
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "6095da99533c4547364f71dfb7c0db3a1bace562" }
|
||||
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "6095da99533c4547364f71dfb7c0db3a1bace562" }
|
||||
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "6095da99533c4547364f71dfb7c0db3a1bace562" }
|
||||
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "6095da99533c4547364f71dfb7c0db3a1bace562" }
|
||||
collab = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d7dc26a906e3ce5d72a309e933f853f1e75da1cb" }
|
||||
collab-entity = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d7dc26a906e3ce5d72a309e933f853f1e75da1cb" }
|
||||
collab-folder = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d7dc26a906e3ce5d72a309e933f853f1e75da1cb" }
|
||||
collab-document = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d7dc26a906e3ce5d72a309e933f853f1e75da1cb" }
|
||||
collab-user = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d7dc26a906e3ce5d72a309e933f853f1e75da1cb" }
|
||||
collab-database = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d7dc26a906e3ce5d72a309e933f853f1e75da1cb" }
|
||||
collab-importer = { git = "https://github.com/AppFlowy-IO/AppFlowy-Collab", rev = "d7dc26a906e3ce5d72a309e933f853f1e75da1cb" }
|
||||
|
||||
[features]
|
||||
history = []
|
||||
|
|
|
@ -144,5 +144,9 @@ APPFLOWY_INDEXER_REDIS_URL=redis://redis:6379
|
|||
APPFLOWY_COLLABORATE_MULTI_THREAD=false
|
||||
APPFLOWY_COLLABORATE_REMOVE_BATCH_SIZE=100
|
||||
|
||||
# AppFlowy Worker
|
||||
APPFLOWY_WORKER_REDIS_URL=redis://redis:6379
|
||||
APPFLOWY_WORKER_DATABASE_URL=postgres://postgres:password@postgres:5432/postgres
|
||||
|
||||
# AppFlowy Web
|
||||
APPFLOWY_WEB_URL=http://localhost:3000
|
||||
|
|
4
dev.env
4
dev.env
|
@ -127,5 +127,9 @@ APPFLOWY_INDEXER_REDIS_URL=redis://redis:6379
|
|||
APPFLOWY_COLLABORATE_MULTI_THREAD=false
|
||||
APPFLOWY_COLLABORATE_REMOVE_BATCH_SIZE=100
|
||||
|
||||
# AppFlowy Worker
|
||||
APPFLOWY_WORKER_REDIS_URL=redis://redis:6379
|
||||
APPFLOWY_WORKER_DATABASE_URL=postgres://postgres:password@postgres:5432/postgres
|
||||
|
||||
# AppFlowy Web
|
||||
APPFLOWY_WEB_URL=http://localhost:3000
|
||||
|
|
|
@ -157,6 +157,26 @@ services:
|
|||
- APPFLOWY_HISTORY_ENVIRONMENT=production
|
||||
- APPFLOWY_HISTORY_DATABASE_URL=${APPFLOWY_HISTORY_DATABASE_URL}
|
||||
|
||||
appflowy_worker:
|
||||
restart: on-failure
|
||||
image: appflowyinc/appflowy_worker:${APPFLOWY_WORKER_VERSION:-latest}
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./services/appflowy-worker/Dockerfile
|
||||
ports:
|
||||
- "4001:4001"
|
||||
environment:
|
||||
- RUST_LOG=${RUST_LOG:-info}
|
||||
- APPFLOWY_WORKER_REDIS_URL=redis://redis:6379
|
||||
- APPFLOWY_WORKER_ENVIRONMENT=production
|
||||
- APPFLOWY_WORKER_DATABASE_URL=${APPFLOWY_WORKER_DATABASE_URL}
|
||||
- APPFLOWY_S3_USE_MINIO=${APPFLOWY_S3_USE_MINIO}
|
||||
- APPFLOWY_S3_MINIO_URL=${APPFLOWY_S3_MINIO_URL}
|
||||
- APPFLOWY_S3_ACCESS_KEY=${APPFLOWY_S3_ACCESS_KEY}
|
||||
- APPFLOWY_S3_SECRET_KEY=${APPFLOWY_S3_SECRET_KEY}
|
||||
- APPFLOWY_S3_BUCKET=${APPFLOWY_S3_BUCKET}
|
||||
- APPFLOWY_S3_REGION=${APPFLOWY_S3_REGION}
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
minio_data:
|
||||
|
|
|
@ -152,6 +152,24 @@ services:
|
|||
- APPFLOWY_HISTORY_ENVIRONMENT=production
|
||||
- APPFLOWY_HISTORY_DATABASE_URL=${APPFLOWY_HISTORY_DATABASE_URL}
|
||||
|
||||
appflowy_worker:
|
||||
restart: on-failure
|
||||
image: appflowyinc/appflowy_worker:${APPFLOWY_WORKER_VERSION:-latest}
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./services/appflowy-worker/Dockerfile
|
||||
environment:
|
||||
- RUST_LOG=${RUST_LOG:-info}
|
||||
- APPFLOWY_ENVIRONMENT=production
|
||||
- APPFLOWY_WORKER_REDIS_URL=redis://redis:6379
|
||||
- APPFLOWY_WORKER_ENVIRONMENT=production
|
||||
- APPFLOWY_WORKER_DATABASE_URL=${APPFLOWY_WORKER_DATABASE_URL}
|
||||
- APPFLOWY_S3_USE_MINIO=${APPFLOWY_S3_USE_MINIO}
|
||||
- APPFLOWY_S3_MINIO_URL=${APPFLOWY_S3_MINIO_URL}
|
||||
- APPFLOWY_S3_ACCESS_KEY=${APPFLOWY_S3_ACCESS_KEY}
|
||||
- APPFLOWY_S3_SECRET_KEY=${APPFLOWY_S3_SECRET_KEY}
|
||||
- APPFLOWY_S3_BUCKET=${APPFLOWY_S3_BUCKET}
|
||||
- APPFLOWY_S3_REGION=${APPFLOWY_S3_REGION}
|
||||
volumes:
|
||||
postgres_data:
|
||||
minio_data:
|
||||
|
|
|
@ -38,6 +38,7 @@ futures = "0.3.30"
|
|||
anyhow = "1.0.80"
|
||||
serde = { version = "1.0.199", features = ["derive"] }
|
||||
hex = "0.4.3"
|
||||
async-trait = "0.1.83"
|
||||
|
||||
[target.'cfg(target_arch = "wasm32")'.dependencies]
|
||||
web-sys = { version = "0.3", features = ["console"] }
|
||||
|
|
|
@ -64,7 +64,7 @@ pub fn localhost_client_with_device_id(device_id: &str) -> Client {
|
|||
&LOCALHOST_GOTRUE,
|
||||
device_id,
|
||||
ClientConfiguration::default(),
|
||||
"0.5.0",
|
||||
"0.7.0",
|
||||
)
|
||||
}
|
||||
|
||||
|
|
97
libs/client-api-test/src/database_util.rs
Normal file
97
libs/client-api-test/src/database_util.rs
Normal file
|
@ -0,0 +1,97 @@
|
|||
use async_trait::async_trait;
|
||||
use collab::core::origin::CollabOrigin;
|
||||
use collab::entity::EncodedCollab;
|
||||
use collab::preclude::Collab;
|
||||
use collab_database::error::DatabaseError;
|
||||
use collab_database::workspace_database::{
|
||||
DatabaseCollabPersistenceService, DatabaseCollabService, EncodeCollabByOid,
|
||||
};
|
||||
use collab_entity::CollabType;
|
||||
use database_entity::dto::QueryCollabResult::{Failed, Success};
|
||||
use database_entity::dto::{QueryCollab, QueryCollabParams};
|
||||
use std::sync::Arc;
|
||||
use tracing::error;
|
||||
|
||||
pub struct TestDatabaseCollabService {
|
||||
pub api_client: client_api::Client,
|
||||
pub workspace_id: String,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl DatabaseCollabService for TestDatabaseCollabService {
|
||||
async fn build_collab(
|
||||
&self,
|
||||
object_id: &str,
|
||||
object_type: CollabType,
|
||||
encoded_collab: Option<(EncodedCollab, bool)>,
|
||||
) -> Result<Collab, DatabaseError> {
|
||||
let encoded_collab = match encoded_collab {
|
||||
None => {
|
||||
let params = QueryCollabParams {
|
||||
workspace_id: self.workspace_id.clone(),
|
||||
inner: QueryCollab {
|
||||
object_id: object_id.to_string(),
|
||||
collab_type: object_type,
|
||||
},
|
||||
};
|
||||
self
|
||||
.api_client
|
||||
.get_collab(params)
|
||||
.await
|
||||
.unwrap()
|
||||
.encode_collab
|
||||
},
|
||||
Some((encoded_collab, _)) => encoded_collab,
|
||||
};
|
||||
Ok(
|
||||
Collab::new_with_source(
|
||||
CollabOrigin::Empty,
|
||||
object_id,
|
||||
encoded_collab.into(),
|
||||
vec![],
|
||||
false,
|
||||
)
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
async fn get_collabs(
|
||||
&self,
|
||||
object_ids: Vec<String>,
|
||||
collab_type: CollabType,
|
||||
) -> Result<EncodeCollabByOid, DatabaseError> {
|
||||
let params = object_ids
|
||||
.into_iter()
|
||||
.map(|object_id| QueryCollab::new(object_id, collab_type.clone()))
|
||||
.collect();
|
||||
let results = self
|
||||
.api_client
|
||||
.batch_get_collab(&self.workspace_id, params)
|
||||
.await
|
||||
.unwrap();
|
||||
Ok(
|
||||
results
|
||||
.0
|
||||
.into_iter()
|
||||
.flat_map(|(object_id, result)| match result {
|
||||
Success { encode_collab_v1 } => match EncodedCollab::decode_from_bytes(&encode_collab_v1)
|
||||
{
|
||||
Ok(encode) => Some((object_id, encode)),
|
||||
Err(err) => {
|
||||
error!("Failed to decode collab: {}", err);
|
||||
None
|
||||
},
|
||||
},
|
||||
Failed { error } => {
|
||||
error!("Failed to get {} update: {}", object_id, error);
|
||||
None
|
||||
},
|
||||
})
|
||||
.collect::<EncodeCollabByOid>(),
|
||||
)
|
||||
}
|
||||
|
||||
fn persistence(&self) -> Option<Arc<dyn DatabaseCollabPersistenceService>> {
|
||||
None
|
||||
}
|
||||
}
|
|
@ -1,4 +1,5 @@
|
|||
mod client;
|
||||
mod database_util;
|
||||
mod log;
|
||||
mod test_client;
|
||||
mod user;
|
||||
|
|
|
@ -13,7 +13,9 @@ use collab::core::origin::{CollabClient, CollabOrigin};
|
|||
use collab::entity::EncodedCollab;
|
||||
use collab::lock::{Mutex, RwLock};
|
||||
use collab::preclude::{Collab, Prelim};
|
||||
use collab_database::database::{Database, DatabaseContext};
|
||||
use collab_database::workspace_database::WorkspaceDatabaseBody;
|
||||
use collab_document::document::Document;
|
||||
use collab_entity::CollabType;
|
||||
use collab_folder::Folder;
|
||||
use collab_user::core::UserAwareness;
|
||||
|
@ -43,6 +45,7 @@ use shared_entity::dto::workspace_dto::{
|
|||
};
|
||||
use shared_entity::response::AppResponseError;
|
||||
|
||||
use crate::database_util::TestDatabaseCollabService;
|
||||
use crate::user::{generate_unique_registered_user, User};
|
||||
use crate::{load_env, localhost_client_with_device_id, setup_log};
|
||||
|
||||
|
@ -122,6 +125,76 @@ impl TestClient {
|
|||
Self::new(registered_user, false).await
|
||||
}
|
||||
|
||||
pub async fn get_folder(&self, workspace_id: &str) -> Folder {
|
||||
let uid = self.uid().await;
|
||||
let folder_collab = self
|
||||
.api_client
|
||||
.get_collab(QueryCollabParams::new(
|
||||
workspace_id.to_string(),
|
||||
CollabType::Folder,
|
||||
workspace_id.to_string(),
|
||||
))
|
||||
.await
|
||||
.unwrap()
|
||||
.encode_collab;
|
||||
Folder::from_collab_doc_state(
|
||||
uid,
|
||||
CollabOrigin::Client(CollabClient::new(uid, self.device_id.clone())),
|
||||
folder_collab.into(),
|
||||
workspace_id,
|
||||
vec![],
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_database(&self, workspace_id: &str, database_id: &str) -> Database {
|
||||
let service = TestDatabaseCollabService {
|
||||
api_client: self.api_client.clone(),
|
||||
workspace_id: workspace_id.to_string(),
|
||||
};
|
||||
let context = DatabaseContext::new(Arc::new(service));
|
||||
Database::open(database_id, context).await.unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_document(&self, workspace_id: &str, document_id: &str) -> Document {
|
||||
let collab = self
|
||||
.get_collab_to_collab(
|
||||
workspace_id.to_string(),
|
||||
document_id.to_string(),
|
||||
CollabType::Document,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
Document::open(collab).unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_workspace_database(&self, workspace_id: &str) -> WorkspaceDatabaseBody {
|
||||
let workspaces = self.api_client.get_workspaces().await.unwrap();
|
||||
let workspace_database_id = workspaces
|
||||
.iter()
|
||||
.find(|w| w.workspace_id.to_string() == workspace_id)
|
||||
.unwrap()
|
||||
.database_storage_id
|
||||
.to_string();
|
||||
|
||||
let collab = self
|
||||
.api_client
|
||||
.get_collab(QueryCollabParams::new(
|
||||
workspace_database_id.clone(),
|
||||
CollabType::WorkspaceDatabase,
|
||||
workspace_id.to_string(),
|
||||
))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
WorkspaceDatabaseBody::from_collab_doc_state(
|
||||
&workspace_database_id,
|
||||
CollabOrigin::Empty,
|
||||
collab.encode_collab.into(),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn get_connect_users(&self, object_id: &str) -> Vec<i64> {
|
||||
#[derive(Deserialize)]
|
||||
struct UserId {
|
||||
|
@ -213,11 +286,10 @@ impl TestClient {
|
|||
}
|
||||
|
||||
pub async fn get_db_collab_from_view(&mut self, workspace_id: &str, view_id: &str) -> Collab {
|
||||
let mut ws_db_collab = self.get_workspace_database_collab(workspace_id).await;
|
||||
let ws_db_body = WorkspaceDatabaseBody::open(&mut ws_db_collab);
|
||||
let txn = ws_db_collab.transact();
|
||||
let ws_db_collab = self.get_workspace_database_collab(workspace_id).await;
|
||||
let ws_db_body = WorkspaceDatabaseBody::open(ws_db_collab).unwrap();
|
||||
let db_id = ws_db_body
|
||||
.get_all_database_meta(&txn)
|
||||
.get_all_database_meta()
|
||||
.into_iter()
|
||||
.find(|db_meta| db_meta.linked_views.contains(&view_id.to_string()))
|
||||
.unwrap()
|
||||
|
|
|
@ -8,7 +8,7 @@ edition = "2021"
|
|||
crate-type = ["cdylib", "rlib"]
|
||||
|
||||
[dependencies]
|
||||
reqwest = { workspace = true }
|
||||
reqwest = { workspace = true, features = ["multipart"] }
|
||||
anyhow = "1.0.79"
|
||||
serde_repr = "0.1.18"
|
||||
gotrue = { path = "../gotrue" }
|
||||
|
@ -48,6 +48,7 @@ futures.workspace = true
|
|||
pin-project = "1.1.5"
|
||||
percent-encoding = "2.3.1"
|
||||
lazy_static = { workspace = true }
|
||||
mime_guess = "2.0.5"
|
||||
|
||||
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
|
||||
tokio-retry = "0.3"
|
||||
|
|
|
@ -16,26 +16,29 @@ use client_api_entity::{
|
|||
use collab_rt_entity::HttpRealtimeMessage;
|
||||
use futures::Stream;
|
||||
use futures_util::stream;
|
||||
use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};
|
||||
use prost::Message;
|
||||
use reqwest::{Body, Method};
|
||||
use reqwest::{multipart, Body, Method};
|
||||
use serde::Serialize;
|
||||
use shared_entity::dto::workspace_dto::CollabResponse;
|
||||
use shared_entity::response::{AppResponse, AppResponseError};
|
||||
use std::future::Future;
|
||||
|
||||
use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};
|
||||
use std::path::Path;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use rayon::prelude::IntoParallelIterator;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::fs::File;
|
||||
use tokio_retry::strategy::{ExponentialBackoff, FixedInterval};
|
||||
use tokio_retry::{Condition, RetryIf};
|
||||
use tokio_util::codec::{BytesCodec, FramedRead};
|
||||
use tracing::{debug, event, info, instrument, trace};
|
||||
|
||||
pub use infra::file_util::ChunkedBytes;
|
||||
use shared_entity::dto::ai_dto::CompleteTextParams;
|
||||
use shared_entity::dto::import_dto::UserImportTask;
|
||||
|
||||
impl Client {
|
||||
pub async fn stream_completion_text(
|
||||
|
@ -294,6 +297,49 @@ impl Client {
|
|||
.await?;
|
||||
AppResponse::<()>::from_response(resp).await?.into_error()
|
||||
}
|
||||
|
||||
pub async fn import_file(&self, file_path: &Path) -> Result<(), AppResponseError> {
|
||||
let file = File::open(&file_path).await?;
|
||||
let file_name = file_path
|
||||
.file_name()
|
||||
.map(|s| s.to_string_lossy().to_string())
|
||||
.unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
|
||||
|
||||
let stream = FramedRead::new(file, BytesCodec::new());
|
||||
let mime = mime_guess::from_path(file_path)
|
||||
.first_or_octet_stream()
|
||||
.to_string();
|
||||
|
||||
let file_part = multipart::Part::stream(reqwest::Body::wrap_stream(stream))
|
||||
.file_name(file_name)
|
||||
.mime_str(&mime)?;
|
||||
|
||||
let form = multipart::Form::new().part("file", file_part);
|
||||
let url = format!("{}/api/import", self.base_url);
|
||||
let mut builder = self
|
||||
.http_client_with_auth(Method::POST, &url)
|
||||
.await?
|
||||
.multipart(form);
|
||||
|
||||
// set the host header
|
||||
builder = builder.header("X-Host", self.base_url.clone());
|
||||
let resp = builder.send().await?;
|
||||
|
||||
AppResponse::<()>::from_response(resp).await?.into_error()
|
||||
}
|
||||
|
||||
pub async fn get_import_list(&self) -> Result<UserImportTask, AppResponseError> {
|
||||
let url = format!("{}/api/import", self.base_url);
|
||||
let resp = self
|
||||
.http_client_with_auth(Method::GET, &url)
|
||||
.await?
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
AppResponse::<UserImportTask>::from_response(resp)
|
||||
.await?
|
||||
.into_data()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
|
|
|
@ -41,8 +41,9 @@ aws-sdk-s3 = { version = "1.36.0", features = [
|
|||
], optional = true }
|
||||
sha2 = "0.10.8"
|
||||
base64 = "0.21.7"
|
||||
rust_decimal = "1.33.1"
|
||||
rust_decimal = "1.36.0"
|
||||
bincode.workspace = true
|
||||
itertools = "0.12.1"
|
||||
|
||||
[features]
|
||||
default = ["s3"]
|
||||
|
|
|
@ -9,13 +9,11 @@ use itertools::{Either, Itertools};
|
|||
use sqlx::{PgPool, Transaction};
|
||||
use tracing::{error, event, Level};
|
||||
|
||||
use app_error::AppError;
|
||||
use database::collab::CollabMetadata;
|
||||
use database_entity::dto::{CollabParams, QueryCollab, QueryCollabResult};
|
||||
|
||||
use crate::collab::disk_cache::CollabDiskCache;
|
||||
use crate::collab::mem_cache::{cache_exp_secs_from_collab_type, CollabMemCache};
|
||||
use crate::state::RedisConnectionManager;
|
||||
use crate::collab::CollabMetadata;
|
||||
use app_error::AppError;
|
||||
use database_entity::dto::{CollabParams, QueryCollab, QueryCollabResult};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CollabCache {
|
||||
|
@ -26,7 +24,7 @@ pub struct CollabCache {
|
|||
}
|
||||
|
||||
impl CollabCache {
|
||||
pub fn new(redis_conn_manager: RedisConnectionManager, pg_pool: PgPool) -> Self {
|
||||
pub fn new(redis_conn_manager: redis::aio::ConnectionManager, pg_pool: PgPool) -> Self {
|
||||
let mem_cache = CollabMemCache::new(redis_conn_manager.clone());
|
||||
let disk_cache = CollabDiskCache::new(pg_pool.clone());
|
||||
Self {
|
|
@ -161,6 +161,156 @@ pub async fn insert_into_af_collab(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Inserts or updates multiple collaboration records for a specific user in bulk. It assumes you are the
|
||||
/// owner of the workspace.
|
||||
///
|
||||
/// This function performs a bulk insert or update operation for collaboration records (`af_collab`)
|
||||
/// and corresponding member records (`af_collab_member`) for a given user and workspace. It processes a
|
||||
/// list of collaboration parameters (`CollabParams`) and ensures that the data is inserted efficiently.
|
||||
///
|
||||
/// It will return error: ON CONFLICT DO UPDATE command cannot affect row a second time, when you're
|
||||
/// trying to insert duplicate rows with the same constrained values in a single INSERT statement.
|
||||
/// PostgreSQL’s ON CONFLICT DO UPDATE cannot handle multiple duplicate rows within the same batch.
|
||||
///
|
||||
/// # Concurrency and Locking:
|
||||
///
|
||||
/// - **Row-level locks**: PostgreSQL acquires row-level locks during inserts or updates, especially with
|
||||
/// the `ON CONFLICT` clause, which resolves conflicts by updating existing rows. If multiple transactions
|
||||
/// attempt to modify the same rows (with the same `oid` and `partition_key`), PostgreSQL will serialize
|
||||
/// access, allowing only one transaction to modify the rows at a time.
|
||||
/// - **No table-wide locks**: Other inserts or updates on different rows can proceed concurrently without
|
||||
/// locking the entire table.
|
||||
/// - **Deadlock risk**: Deadlocks may occur when transactions attempt to modify the same rows concurrently,
|
||||
/// but PostgreSQL automatically resolves them by aborting one of the transactions. To minimize this risk,
|
||||
/// ensure transactions access rows in a consistent order.
|
||||
///
|
||||
/// # Best Practices for High Concurrency:
|
||||
///
|
||||
/// - **Batch inserts**: To reduce row-level contention, consider breaking large datasets into smaller batches
|
||||
/// (e.g., 100 rows at a time) when performing bulk inserts.
|
||||
/// | Row Size | Total Rows | Batch Insert Time | Chunked Insert Time (2000-row chunks) |
|
||||
/// |----------|------------|-------------------|--------------------------------------|
|
||||
/// | 1KB | 500 | 41.43 ms | 31.24 ms |
|
||||
/// | 1KB | 1000 | 79.30 ms | 48.07 ms |
|
||||
/// | 1KB | 2000 | 129.50 ms | 86.75 ms |
|
||||
/// | 1KB | 3000 | 153.59 ms | 121.09 ms |
|
||||
/// | 1KB | 6000 | 427.08 ms | 500.08 ms |
|
||||
/// | 5KB | 500 | 79.70 ms | 66.98 ms |
|
||||
/// | 5KB | 1000 | 140.58 ms | 121.60 ms |
|
||||
/// | 5KB | 2000 | 257.42 ms | 245.02 ms |
|
||||
/// | 5KB | 3000 | 418.10 ms | 380.64 ms |
|
||||
/// | 5KB | 6000 | 776.63 ms | 730.69 ms |
|
||||
/// For 1KB rows: Chunked inserts provide better performance for small datasets (up to 3000 rows), but batch inserts become more efficient for larger datasets (6000+ rows).
|
||||
/// For 5KB rows: Chunked inserts consistently outperform or match batch inserts, making them the preferred method across different dataset sizes.
|
||||
///
|
||||
/// - **Consistent transaction ordering**: Access rows in a consistent order across transactions to reduce
|
||||
/// the risk of deadlocks.
|
||||
/// - **Optimistic concurrency control**: For highly concurrent environments, implement optimistic concurrency
|
||||
/// control to handle conflicts after they occur rather than preventing them upfront.
|
||||
///
|
||||
/// # Why Use a Transaction Instead of `PgPool`:
|
||||
///
|
||||
/// - Using a transaction ensures that all database operations (insert/update for both
|
||||
/// `af_collab` and `af_collab_member`) succeed or fail together. This means that if any part of the
|
||||
/// operation fails, all changes will be rolled back, ensuring data consistency.
|
||||
///
|
||||
///
|
||||
#[inline]
|
||||
#[instrument(level = "trace", skip_all, fields(uid=%uid, workspace_id=%workspace_id), err)]
|
||||
pub async fn insert_into_af_collab_bulk_for_user(
|
||||
tx: &mut Transaction<'_, Postgres>,
|
||||
uid: &i64,
|
||||
workspace_id: &str,
|
||||
collab_params_list: &[CollabParams],
|
||||
) -> Result<(), AppError> {
|
||||
if collab_params_list.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let encrypt = 0;
|
||||
let workspace_uuid = Uuid::from_str(workspace_id)?;
|
||||
|
||||
// Insert values into the `af_collab_member` and `af_collab` tables in bulk
|
||||
let len = collab_params_list.len();
|
||||
let mut object_ids: Vec<Uuid> = Vec::with_capacity(len);
|
||||
let mut blobs: Vec<Vec<u8>> = Vec::with_capacity(len);
|
||||
let mut lengths: Vec<i32> = Vec::with_capacity(len);
|
||||
let mut partition_keys: Vec<i32> = Vec::with_capacity(len);
|
||||
let mut permission_ids: Vec<i32> = Vec::with_capacity(len);
|
||||
let uids: Vec<i64> = vec![*uid; collab_params_list.len()];
|
||||
let workspace_ids: Vec<Uuid> = vec![workspace_uuid; collab_params_list.len()];
|
||||
|
||||
let permission_id: i32 = sqlx::query_scalar!(
|
||||
r#"
|
||||
SELECT rp.permission_id
|
||||
FROM af_role_permissions rp
|
||||
JOIN af_roles ON rp.role_id = af_roles.id
|
||||
WHERE af_roles.name = 'Owner';
|
||||
"#
|
||||
)
|
||||
.fetch_one(tx.deref_mut())
|
||||
.await?;
|
||||
|
||||
for params in collab_params_list {
|
||||
let partition_key = partition_key_from_collab_type(¶ms.collab_type);
|
||||
object_ids.push(Uuid::from_str(¶ms.object_id)?);
|
||||
blobs.push(params.encoded_collab_v1.to_vec());
|
||||
lengths.push(params.encoded_collab_v1.len() as i32);
|
||||
partition_keys.push(partition_key);
|
||||
permission_ids.push(permission_id);
|
||||
}
|
||||
|
||||
// Bulk insert into `af_collab_member` for the user and provided collab params
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO af_collab_member (uid, oid, permission_id)
|
||||
SELECT * FROM UNNEST($1::bigint[], $2::uuid[], $3::int[])
|
||||
ON CONFLICT (uid, oid)
|
||||
DO NOTHING;
|
||||
"#,
|
||||
&uids,
|
||||
&object_ids,
|
||||
&permission_ids
|
||||
)
|
||||
.execute(tx.deref_mut())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
AppError::Internal(anyhow!(
|
||||
"Bulk insert/update into af_collab_member failed for uid: {}, error details: {:?}",
|
||||
uid,
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
// Bulk insert into `af_collab` for the provided collab params
|
||||
sqlx::query!(
|
||||
r#"
|
||||
INSERT INTO af_collab (oid, blob, len, partition_key, encrypt, owner_uid, workspace_id)
|
||||
SELECT * FROM UNNEST($1::uuid[], $2::bytea[], $3::int[], $4::int[], $5::int[], $6::bigint[], $7::uuid[])
|
||||
ON CONFLICT (oid, partition_key)
|
||||
DO NOTHING;
|
||||
"#,
|
||||
&object_ids,
|
||||
&blobs,
|
||||
&lengths,
|
||||
&partition_keys,
|
||||
&vec![encrypt; collab_params_list.len()],
|
||||
&uids,
|
||||
&workspace_ids
|
||||
)
|
||||
.execute(tx.deref_mut())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
AppError::Internal(anyhow!(
|
||||
"Bulk insert/update into af_collab failed for uid: {}, error details: {:?}",
|
||||
uid,
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub async fn select_blob_from_af_collab<'a, E>(
|
||||
conn: E,
|
||||
|
|
|
@ -8,14 +8,14 @@ use tokio::time::sleep;
|
|||
use tracing::{event, instrument, Level};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::collab::decode_util::encode_collab_from_bytes;
|
||||
use app_error::AppError;
|
||||
use database::collab::{
|
||||
use crate::collab::util::encode_collab_from_bytes;
|
||||
use crate::collab::{
|
||||
batch_select_collab_blob, insert_into_af_collab, is_collab_exists, select_blob_from_af_collab,
|
||||
select_collab_meta_from_af_collab, AppResult,
|
||||
};
|
||||
use database::index::upsert_collab_embeddings;
|
||||
use database::pg_row::AFCollabRowMeta;
|
||||
use crate::index::upsert_collab_embeddings;
|
||||
use crate::pg_row::AFCollabRowMeta;
|
||||
use app_error::AppError;
|
||||
use database_entity::dto::{CollabParams, QueryCollab, QueryCollabResult};
|
||||
|
||||
#[derive(Clone)]
|
|
@ -4,20 +4,19 @@ use collab_entity::CollabType;
|
|||
use redis::{pipe, AsyncCommands};
|
||||
use tracing::{error, instrument, trace};
|
||||
|
||||
use crate::collab::decode_util::encode_collab_from_bytes;
|
||||
use crate::state::RedisConnectionManager;
|
||||
use crate::collab::util::encode_collab_from_bytes;
|
||||
use crate::collab::CollabMetadata;
|
||||
use app_error::AppError;
|
||||
use database::collab::CollabMetadata;
|
||||
|
||||
const SEVEN_DAYS: u64 = 604800;
|
||||
const ONE_MONTH: u64 = 2592000;
|
||||
#[derive(Clone)]
|
||||
pub struct CollabMemCache {
|
||||
connection_manager: RedisConnectionManager,
|
||||
connection_manager: redis::aio::ConnectionManager,
|
||||
}
|
||||
|
||||
impl CollabMemCache {
|
||||
pub fn new(connection_manager: RedisConnectionManager) -> Self {
|
||||
pub fn new(connection_manager: redis::aio::ConnectionManager) -> Self {
|
||||
Self { connection_manager }
|
||||
}
|
||||
|
|
@ -1,6 +1,9 @@
|
|||
pub mod cache;
|
||||
mod collab_db_ops;
|
||||
mod collab_storage;
|
||||
// mod recent;
|
||||
mod disk_cache;
|
||||
pub mod mem_cache;
|
||||
mod util;
|
||||
|
||||
pub use collab_db_ops::*;
|
||||
use collab_entity::CollabType;
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
use chrono::Utc;
|
||||
use sqlx::{Error, PgPool};
|
||||
use std::ops::DerefMut;
|
||||
|
||||
#[derive(Debug, sqlx::FromRow)]
|
||||
struct RecentCollabRow {
|
||||
recent_oid: String,
|
||||
recent_partition_key: i32,
|
||||
snapshot_created_at: Option<chrono::DateTime<Utc>>,
|
||||
}
|
||||
|
||||
pub async fn select_recent_collab_with_limit(
|
||||
pg_pool: &PgPool,
|
||||
limit: i64,
|
||||
) -> Result<Vec<RecentCollabRow>, Error> {
|
||||
let recents = sqlx::query_as!(
|
||||
RecentCollabRow,
|
||||
"SELECT recent_oid, recent_partition_key, snapshot_created_at \
|
||||
FROM af_collab_recent \
|
||||
ORDER BY access_time DESC LIMIT $1",
|
||||
limit
|
||||
)
|
||||
.fetch_all(pg_pool)
|
||||
.await?;
|
||||
|
||||
Ok(recents)
|
||||
}
|
||||
|
||||
/// Insert a recent collab into the database. Update the record if it already exists.
|
||||
pub async fn insert_or_update_recent_collab(
|
||||
pg_pool: &PgPool,
|
||||
recent_collabs: Vec<RecentCollabRow>,
|
||||
) -> Result<(), Error> {
|
||||
let mut transaction = pg_pool.begin().await?;
|
||||
for collab in recent_collabs.iter() {
|
||||
sqlx::query!(
|
||||
"INSERT INTO af_collab_recent (recent_oid, recent_partition_key, access_time)
|
||||
VALUES ($1, $2, NOW()) ON CONFLICT (recent_oid)
|
||||
DO UPDATE SET recent_partition_key = EXCLUDED.recent_partition_key, access_time = NOW()",
|
||||
collab.recent_oid,
|
||||
collab.recent_partition_key
|
||||
)
|
||||
.execute(transaction.deref_mut())
|
||||
.await?;
|
||||
}
|
||||
|
||||
transaction.commit().await?;
|
||||
Ok(())
|
||||
}
|
|
@ -12,5 +12,6 @@ pub(crate) async fn encode_collab_from_bytes(bytes: Vec<u8>) -> Result<EncodedCo
|
|||
err
|
||||
))),
|
||||
})
|
||||
.await?
|
||||
.await
|
||||
.map_err(|err| AppError::Internal(anyhow!("Failed to spawn blocking task: {:?}", err)))?
|
||||
}
|
|
@ -4,11 +4,13 @@ use crate::resource_usage::{
|
|||
};
|
||||
use app_error::AppError;
|
||||
use async_trait::async_trait;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use database_entity::file_dto::{
|
||||
CompleteUploadRequest, CreateUploadRequest, CreateUploadResponse, UploadPartData,
|
||||
UploadPartResponse,
|
||||
};
|
||||
use sqlx::PgPool;
|
||||
|
||||
use tracing::{info, instrument, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -26,7 +28,7 @@ pub trait BucketClient {
|
|||
async fn put_blob_as_content_type(
|
||||
&self,
|
||||
object_key: &str,
|
||||
content: &[u8],
|
||||
stream: ByteStream,
|
||||
content_type: &str,
|
||||
) -> Result<(), AppError>;
|
||||
|
||||
|
|
|
@ -108,22 +108,20 @@ impl BucketClient for AwsS3BucketClientImpl {
|
|||
async fn put_blob_as_content_type(
|
||||
&self,
|
||||
object_key: &str,
|
||||
content: &[u8],
|
||||
stream: ByteStream,
|
||||
content_type: &str,
|
||||
) -> Result<(), AppError> {
|
||||
trace!(
|
||||
"Uploading object to S3 bucket:{}, key {}, len: {}",
|
||||
"Uploading object to S3 bucket:{}, key {}",
|
||||
self.bucket,
|
||||
object_key,
|
||||
content.len()
|
||||
);
|
||||
let body = ByteStream::from(content.to_vec());
|
||||
self
|
||||
.client
|
||||
.put_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(object_key)
|
||||
.body(body)
|
||||
.body(stream)
|
||||
.content_type(content_type)
|
||||
.send()
|
||||
.await
|
||||
|
|
|
@ -3,7 +3,7 @@ use serde::de::DeserializeOwned;
|
|||
use sqlx::postgres::PgListener;
|
||||
use sqlx::PgPool;
|
||||
use tokio::sync::broadcast;
|
||||
use tracing::{error, trace};
|
||||
use tracing::error;
|
||||
|
||||
pub struct PostgresDBListener<T: Clone> {
|
||||
pub notify: broadcast::Sender<T>,
|
||||
|
@ -22,7 +22,6 @@ where
|
|||
let notify = tx.clone();
|
||||
tokio::spawn(async move {
|
||||
while let Ok(notification) = listener.recv().await {
|
||||
trace!("Received notification: {}", notification.payload());
|
||||
match serde_json::from_str::<T>(notification.payload()) {
|
||||
Ok(change) => {
|
||||
let _ = tx.send(change);
|
||||
|
|
|
@ -551,6 +551,16 @@ impl From<AFTemplateGroupRow> for TemplateGroup {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, FromRow, Serialize, Deserialize)]
|
||||
pub struct AFImportTask {
|
||||
pub task_id: Uuid,
|
||||
pub file_size: i64,
|
||||
pub workspace_id: String,
|
||||
pub created_by: i64,
|
||||
pub status: i16,
|
||||
pub metadata: serde_json::Value,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
#[derive(sqlx::Type, Serialize, Deserialize, Debug)]
|
||||
#[repr(i32)]
|
||||
pub enum AFAccessRequestStatusColumn {
|
||||
|
|
|
@ -9,8 +9,9 @@ use tracing::{event, instrument};
|
|||
use uuid::Uuid;
|
||||
|
||||
use crate::pg_row::{
|
||||
AFGlobalCommentRow, AFPermissionRow, AFReactionRow, AFUserProfileRow, AFWebUserColumn,
|
||||
AFWorkspaceInvitationMinimal, AFWorkspaceMemberPermRow, AFWorkspaceMemberRow, AFWorkspaceRow,
|
||||
AFGlobalCommentRow, AFImportTask, AFPermissionRow, AFReactionRow, AFUserProfileRow,
|
||||
AFWebUserColumn, AFWorkspaceInvitationMinimal, AFWorkspaceMemberPermRow, AFWorkspaceMemberRow,
|
||||
AFWorkspaceRow,
|
||||
};
|
||||
use crate::user::select_uid_from_email;
|
||||
use app_error::AppError;
|
||||
|
@ -35,13 +36,14 @@ pub async fn insert_user_workspace(
|
|||
tx: &mut Transaction<'_, sqlx::Postgres>,
|
||||
user_uuid: &Uuid,
|
||||
workspace_name: &str,
|
||||
is_initialized: bool,
|
||||
) -> Result<AFWorkspaceRow, AppError> {
|
||||
let workspace = sqlx::query_as!(
|
||||
AFWorkspaceRow,
|
||||
r#"
|
||||
WITH new_workspace AS (
|
||||
INSERT INTO public.af_workspace (owner_uid, workspace_name)
|
||||
VALUES ((SELECT uid FROM public.af_user WHERE uuid = $1), $2)
|
||||
INSERT INTO public.af_workspace (owner_uid, workspace_name, is_initialized)
|
||||
VALUES ((SELECT uid FROM public.af_user WHERE uuid = $1), $2, $3)
|
||||
RETURNING *
|
||||
)
|
||||
SELECT
|
||||
|
@ -60,6 +62,7 @@ pub async fn insert_user_workspace(
|
|||
"#,
|
||||
user_uuid,
|
||||
workspace_name,
|
||||
is_initialized,
|
||||
)
|
||||
.fetch_one(tx.deref_mut())
|
||||
.await?;
|
||||
|
@ -665,6 +668,28 @@ pub async fn select_workspace<'a, E: Executor<'a, Database = Postgres>>(
|
|||
.await?;
|
||||
Ok(workspace)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub async fn select_workspace_database_storage_id<'a, E: Executor<'a, Database = Postgres>>(
|
||||
executor: E,
|
||||
workspace_id: &str,
|
||||
) -> Result<Uuid, AppError> {
|
||||
let workspace_id = Uuid::parse_str(workspace_id)?;
|
||||
let result = sqlx::query!(
|
||||
r#"
|
||||
SELECT
|
||||
database_storage_id
|
||||
FROM public.af_workspace
|
||||
WHERE workspace_id = $1
|
||||
"#,
|
||||
workspace_id
|
||||
)
|
||||
.fetch_one(executor)
|
||||
.await?;
|
||||
|
||||
Ok(result.database_storage_id)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub async fn update_updated_at_of_workspace<'a, E: Executor<'a, Database = Postgres>>(
|
||||
executor: E,
|
||||
|
@ -711,7 +736,8 @@ pub async fn select_all_user_workspaces<'a, E: Executor<'a, Database = Postgres>
|
|||
JOIN public.af_user u ON w.owner_uid = u.uid
|
||||
WHERE wm.uid = (
|
||||
SELECT uid FROM public.af_user WHERE uuid = $1
|
||||
);
|
||||
)
|
||||
AND COALESCE(w.is_initialized, true) = true;
|
||||
"#,
|
||||
user_uuid
|
||||
)
|
||||
|
@ -739,6 +765,33 @@ pub async fn select_user_owned_workspaces_id<'a, E: Executor<'a, Database = Post
|
|||
Ok(workspace_ids)
|
||||
}
|
||||
|
||||
pub async fn update_workspace_status<'a, E: Executor<'a, Database = Postgres>>(
|
||||
executor: E,
|
||||
workspace_id: &str,
|
||||
is_initialized: bool,
|
||||
) -> Result<(), AppError> {
|
||||
let workspace_id = Uuid::parse_str(workspace_id)?;
|
||||
let res = sqlx::query!(
|
||||
r#"
|
||||
UPDATE public.af_workspace
|
||||
SET is_initialized = $2
|
||||
WHERE workspace_id = $1
|
||||
"#,
|
||||
workspace_id,
|
||||
is_initialized
|
||||
)
|
||||
.execute(executor)
|
||||
.await?;
|
||||
|
||||
if res.rows_affected() != 1 {
|
||||
tracing::error!(
|
||||
"Failed to update workspace status, workspace_id: {}",
|
||||
workspace_id
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn select_member_count_for_workspaces<'a, E: Executor<'a, Database = Postgres>>(
|
||||
executor: E,
|
||||
workspace_ids: &[Uuid],
|
||||
|
@ -1380,3 +1433,117 @@ pub async fn select_user_is_invitee_for_workspace_invitation(
|
|||
.await?;
|
||||
res.map_or(Ok(false), Ok)
|
||||
}
|
||||
|
||||
/// Get the import task for the user
|
||||
/// Status of the file import (e.g., 0 for pending, 1 for completed, 2 for failed)
|
||||
pub async fn select_import_task(
|
||||
user_id: i64,
|
||||
pg_pool: &PgPool,
|
||||
filter_by_status: Option<i32>,
|
||||
) -> Result<Vec<AFImportTask>, AppError> {
|
||||
let mut query = String::from("SELECT * FROM af_import_task WHERE created_by = $1");
|
||||
if filter_by_status.is_some() {
|
||||
query.push_str(" AND status = $2");
|
||||
}
|
||||
query.push_str(" ORDER BY created_at DESC");
|
||||
|
||||
let import_tasks = if let Some(status) = filter_by_status {
|
||||
sqlx::query_as::<_, AFImportTask>(&query)
|
||||
.bind(user_id)
|
||||
.bind(status)
|
||||
.fetch_all(pg_pool)
|
||||
.await?
|
||||
} else {
|
||||
sqlx::query_as::<_, AFImportTask>(&query)
|
||||
.bind(user_id)
|
||||
.fetch_all(pg_pool)
|
||||
.await?
|
||||
};
|
||||
|
||||
Ok(import_tasks)
|
||||
}
|
||||
|
||||
/// Update import task status
|
||||
/// 0 => Pending,
|
||||
/// 1 => Completed,
|
||||
/// 2 => Failed,
|
||||
pub async fn update_import_task_status<'a, E: Executor<'a, Database = Postgres>>(
|
||||
task_id: &Uuid,
|
||||
new_status: i32,
|
||||
executor: E,
|
||||
) -> Result<(), AppError> {
|
||||
let query = "UPDATE af_import_task SET status = $1 WHERE task_id = $2";
|
||||
sqlx::query(query)
|
||||
.bind(new_status)
|
||||
.bind(task_id)
|
||||
.execute(executor)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to update status for task_id {}: {:?}",
|
||||
task_id,
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn insert_import_task(
|
||||
task_id: Uuid,
|
||||
file_size: i64,
|
||||
workspace_id: String,
|
||||
created_by: i64,
|
||||
metadata: Option<serde_json::Value>,
|
||||
pg_pool: &PgPool,
|
||||
) -> Result<(), AppError> {
|
||||
let query = r#"
|
||||
INSERT INTO af_import_task (task_id, file_size, workspace_id, created_by, status, metadata)
|
||||
VALUES ($1, $2, $3, $4, $5, COALESCE($6, '{}'))
|
||||
"#;
|
||||
|
||||
sqlx::query(query)
|
||||
.bind(task_id)
|
||||
.bind(file_size)
|
||||
.bind(workspace_id)
|
||||
.bind(created_by)
|
||||
.bind(0)
|
||||
.bind(metadata)
|
||||
.execute(pg_pool)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to create a new import task: {:?}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update_import_task_metadata(
|
||||
task_id: Uuid,
|
||||
new_metadata: serde_json::Value,
|
||||
pg_pool: &PgPool,
|
||||
) -> Result<(), AppError> {
|
||||
let query = r#"
|
||||
UPDATE af_import_task
|
||||
SET metadata = metadata || $1
|
||||
WHERE task_id = $2
|
||||
"#;
|
||||
|
||||
sqlx::query(query)
|
||||
.bind(new_metadata)
|
||||
.bind(task_id)
|
||||
.execute(pg_pool)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
AppError::Internal(anyhow::anyhow!(
|
||||
"Failed to update metadata for task_id {}: {:?}",
|
||||
task_id,
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
33
libs/shared-entity/src/dto/import_dto.rs
Normal file
33
libs/shared-entity/src/dto/import_dto.rs
Normal file
|
@ -0,0 +1,33 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct UserImportTask {
|
||||
pub tasks: Vec<ImportTaskDetail>,
|
||||
pub has_more: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ImportTaskDetail {
|
||||
pub task_id: String,
|
||||
pub file_size: u64,
|
||||
pub created_at: i64,
|
||||
pub status: ImportTaskStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub enum ImportTaskStatus {
|
||||
Pending,
|
||||
Completed,
|
||||
Failed,
|
||||
}
|
||||
|
||||
impl From<i16> for ImportTaskStatus {
|
||||
fn from(status: i16) -> Self {
|
||||
match status {
|
||||
0 => ImportTaskStatus::Pending,
|
||||
1 => ImportTaskStatus::Completed,
|
||||
2 => ImportTaskStatus::Failed,
|
||||
_ => ImportTaskStatus::Pending,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,6 +3,7 @@ pub mod ai_dto;
|
|||
pub mod auth_dto;
|
||||
pub mod billing_dto;
|
||||
pub mod history_dto;
|
||||
pub mod import_dto;
|
||||
pub mod publish_dto;
|
||||
pub mod search_dto;
|
||||
pub mod server_info_dto;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use crate::response::{AppResponse, AppResponseError};
|
||||
use app_error::ErrorCode;
|
||||
use app_error::{AppError, ErrorCode};
|
||||
use bytes::{Buf, Bytes, BytesMut};
|
||||
use futures::{ready, Stream, TryStreamExt};
|
||||
|
||||
|
@ -9,12 +9,12 @@ use serde_json::de::SliceRead;
|
|||
use serde_json::StreamDeserializer;
|
||||
|
||||
use crate::dto::ai_dto::StringOrMessage;
|
||||
use anyhow::anyhow;
|
||||
use futures::stream::StreamExt;
|
||||
use std::marker::PhantomData;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use futures::stream::StreamExt;
|
||||
|
||||
impl<T> AppResponse<T>
|
||||
where
|
||||
T: DeserializeOwned + 'static,
|
||||
|
@ -149,7 +149,8 @@ where
|
|||
Poll::Ready(None)
|
||||
} else {
|
||||
// Return any other errors that occur during deserialization
|
||||
Poll::Ready(Some(Err(AppResponseError::from(err))))
|
||||
let err = AppError::Internal(anyhow!("Error deserializing JSON:{}", err));
|
||||
Poll::Ready(Some(Err(err.into())))
|
||||
}
|
||||
},
|
||||
}
|
||||
|
|
21
migrations/20240930135712_import_data.sql
Normal file
21
migrations/20240930135712_import_data.sql
Normal file
|
@ -0,0 +1,21 @@
|
|||
-- Add migration script here
|
||||
CREATE TABLE af_import_task(
|
||||
task_id UUID NOT NULL PRIMARY KEY,
|
||||
file_size BIGINT NOT NULL, -- File size in bytes, BIGINT for large files
|
||||
workspace_id TEXT NOT NULL, -- Workspace id
|
||||
created_by BIGINT NOT NULL, -- User ID
|
||||
status SMALLINT NOT NULL, -- Status of the file import (e.g., 0 for pending, 1 for completed, 2 for failed)
|
||||
metadata JSONB DEFAULT '{}' NOT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_af_import_task_status_created_at
|
||||
ON af_import_task (status, created_at);
|
||||
|
||||
-- For existing workspaces, this column will be NULL. So Null and true will be considered as
|
||||
-- initialized and false will be considered as not initialized.
|
||||
ALTER TABLE af_workspace
|
||||
ADD COLUMN is_initialized BOOLEAN DEFAULT NULL;
|
||||
|
||||
CREATE INDEX idx_af_workspace_is_initialized
|
||||
ON af_workspace (is_initialized);
|
|
@ -88,6 +88,30 @@ http {
|
|||
proxy_send_timeout 600s;
|
||||
}
|
||||
|
||||
location /api/import {
|
||||
set $appflowy_cloud appflowy_cloud;
|
||||
proxy_pass http://$appflowy_cloud:8000;
|
||||
|
||||
proxy_set_header X-Request-Id $request_id;
|
||||
proxy_set_header Host $http_host;
|
||||
|
||||
# Handle CORS
|
||||
if ($http_origin ~* (http://127.0.0.1:8000)) {
|
||||
add_header 'Access-Control-Allow-Origin' $http_origin always;
|
||||
}
|
||||
add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, PATCH';
|
||||
add_header 'Access-Control-Allow-Headers' 'Content-Type, Authorization, Accept';
|
||||
add_header 'Access-Control-Max-Age' 3600;
|
||||
|
||||
# Timeouts
|
||||
proxy_read_timeout 600s;
|
||||
proxy_connect_timeout 600s;
|
||||
proxy_send_timeout 600s;
|
||||
|
||||
# Disable buffering for large file uploads
|
||||
proxy_request_buffering off;
|
||||
}
|
||||
|
||||
location /api {
|
||||
set $appflowy_cloud appflowy_cloud;
|
||||
proxy_pass http://$appflowy_cloud:8000;
|
||||
|
|
2
rust-toolchain.toml
Normal file
2
rust-toolchain.toml
Normal file
|
@ -0,0 +1,2 @@
|
|||
[toolchain]
|
||||
channel = "1.77.2"
|
|
@ -3,7 +3,7 @@
|
|||
# Generate the current dependency list
|
||||
cargo tree > current_deps.txt
|
||||
|
||||
BASELINE_COUNT=621
|
||||
BASELINE_COUNT=629
|
||||
CURRENT_COUNT=$(cat current_deps.txt | wc -l)
|
||||
|
||||
echo "Expected dependency count (baseline): $BASELINE_COUNT"
|
||||
|
|
|
@ -15,6 +15,7 @@ use tracing::{info, warn};
|
|||
use crate::actix_ws::server::RealtimeServerActor;
|
||||
use access_control::access::AccessControl;
|
||||
use appflowy_ai_client::client::AppFlowyAIClient;
|
||||
use database::collab::cache::CollabCache;
|
||||
use workspace_access::notification::spawn_listen_on_workspace_member_change;
|
||||
use workspace_access::WorkspaceAccessControlImpl;
|
||||
|
||||
|
@ -22,7 +23,6 @@ use crate::api::{collab_scope, ws_scope};
|
|||
use crate::collab::access_control::{
|
||||
CollabAccessControlImpl, CollabStorageAccessControlImpl, RealtimeCollabAccessControlImpl,
|
||||
};
|
||||
use crate::collab::cache::CollabCache;
|
||||
use crate::collab::notification::spawn_listen_on_collab_member_change;
|
||||
use crate::collab::storage::CollabStorageImpl;
|
||||
use crate::command::{CLCommandReceiver, CLCommandSender};
|
||||
|
|
|
@ -2,13 +2,13 @@ use async_trait::async_trait;
|
|||
use std::sync::Arc;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::collab::cache::CollabCache;
|
||||
use access_control::access::ObjectType;
|
||||
use access_control::access::{enable_access_control, AccessControl};
|
||||
use access_control::act::{Action, ActionVariant};
|
||||
use access_control::collab::{CollabAccessControl, RealtimeAccessControl};
|
||||
use access_control::workspace::WorkspaceAccessControl;
|
||||
use app_error::AppError;
|
||||
use database::collab::cache::CollabCache;
|
||||
use database::collab::CollabStorageAccessControl;
|
||||
use database_entity::dto::AFAccessLevel;
|
||||
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
pub mod access_control;
|
||||
pub mod cache;
|
||||
mod decode_util;
|
||||
pub mod disk_cache;
|
||||
pub mod mem_cache;
|
||||
pub mod notification;
|
||||
pub mod queue;
|
||||
mod queue_redis_ops;
|
||||
|
|
|
@ -14,9 +14,9 @@ use tokio::time::{interval, sleep, sleep_until, Instant};
|
|||
use tracing::{error, instrument, trace, warn};
|
||||
|
||||
use app_error::AppError;
|
||||
use database::collab::cache::CollabCache;
|
||||
use database_entity::dto::{AFCollabEmbeddings, CollabParams, QueryCollab, QueryCollabResult};
|
||||
|
||||
use crate::collab::cache::CollabCache;
|
||||
use crate::collab::queue_redis_ops::{
|
||||
get_pending_meta, remove_pending_meta, storage_cache_key, PendingWrite, WritePriority,
|
||||
PENDING_WRITE_META_EXPIRE_SECS,
|
||||
|
|
|
@ -15,10 +15,10 @@ use tracing::{error, instrument, trace};
|
|||
use validator::Validate;
|
||||
|
||||
use crate::collab::access_control::CollabAccessControlImpl;
|
||||
use crate::collab::cache::CollabCache;
|
||||
use crate::command::{CLCommandSender, CollaborationCommand};
|
||||
use crate::shared_state::RealtimeSharedState;
|
||||
use app_error::AppError;
|
||||
use database::collab::cache::CollabCache;
|
||||
use database::collab::{
|
||||
AppResult, CollabMetadata, CollabStorage, CollabStorageAccessControl, GetCollabOrigin,
|
||||
};
|
||||
|
|
48
services/appflowy-worker/Cargo.toml
Normal file
48
services/appflowy-worker/Cargo.toml
Normal file
|
@ -0,0 +1,48 @@
|
|||
[package]
|
||||
name = "appflowy-worker"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[[bin]]
|
||||
path = "src/main.rs"
|
||||
name = "appflowy_worker"
|
||||
|
||||
[lib]
|
||||
path = "src/lib.rs"
|
||||
|
||||
[dependencies]
|
||||
collab.workspace = true
|
||||
collab-entity.workspace = true
|
||||
collab-importer.workspace = true
|
||||
collab-folder.workspace = true
|
||||
collab-database.workspace = true
|
||||
tracing.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
anyhow.workspace = true
|
||||
database.workspace = true
|
||||
database-entity.workspace = true
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "net"] }
|
||||
tokio-stream = { version = "0.1", features = ["net"] }
|
||||
redis = { workspace = true, features = ["aio", "tokio-comp", "connection-manager", "streams"] }
|
||||
dotenvy = "0.15.0"
|
||||
axum = "0.7.4"
|
||||
thiserror = "1.0.58"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
|
||||
serde_repr = "0.1.18"
|
||||
futures = "0.3.30"
|
||||
infra.workspace = true
|
||||
sqlx = { workspace = true, default-features = false, features = ["runtime-tokio-rustls", "macros", "postgres", "uuid", "chrono", "migrate"] }
|
||||
secrecy = { version = "0.8", features = ["serde"] }
|
||||
aws-sdk-s3 = { version = "1.36.0", features = [
|
||||
"behavior-version-latest",
|
||||
"rt-tokio",
|
||||
] }
|
||||
aws-config = { version = "1.5.1", features = ["behavior-version-latest"] }
|
||||
tokio-util = { version = "0.7.12", features = ["compat"] }
|
||||
async_zip = { version = "0.0.17", features = ["full"] }
|
||||
mime_guess = "2.0"
|
||||
bytes.workspace = true
|
||||
uuid.workspace = true
|
||||
|
41
services/appflowy-worker/Dockerfile
Normal file
41
services/appflowy-worker/Dockerfile
Normal file
|
@ -0,0 +1,41 @@
|
|||
FROM lukemathwalker/cargo-chef:latest-rust-1.77 as chef
|
||||
|
||||
# Set the initial working directory
|
||||
WORKDIR /app
|
||||
RUN apt update && apt install lld clang -y
|
||||
|
||||
FROM chef as planner
|
||||
COPY . .
|
||||
|
||||
# Compute a lock-like file for our project
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef as builder
|
||||
|
||||
# Update package lists and install protobuf-compiler along with other build dependencies
|
||||
RUN apt update && apt install -y protobuf-compiler lld clang
|
||||
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# Build our project dependencies
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY . .
|
||||
|
||||
WORKDIR /app/services/appflowy-worker
|
||||
RUN cargo build --release --bin appflowy_worker
|
||||
|
||||
FROM debian:bookworm-slim AS runtime
|
||||
WORKDIR /app/services/appflowy-worker
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y --no-install-recommends openssl \
|
||||
&& apt-get install -y -y ca-certificates \
|
||||
# Clean up
|
||||
&& apt-get autoremove -y \
|
||||
&& apt-get clean -y \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app/
|
||||
COPY --from=builder /app/target/release/appflowy_worker /usr/local/bin/appflowy_worker
|
||||
ENV APP_ENVIRONMENT production
|
||||
ENV RUST_BACKTRACE 1
|
||||
CMD ["appflowy_worker"]
|
5
services/appflowy-worker/README.md
Normal file
5
services/appflowy-worker/README.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
Build docker image for appflowy worker manually
|
||||
|
||||
```shell
|
||||
docker buildx build -f ./services/appflowy-worker/Dockerfile --platform linux/amd64 -t appflowyinc/appflowy_worker --push .
|
||||
```
|
5
services/appflowy-worker/deploy.env
Normal file
5
services/appflowy-worker/deploy.env
Normal file
|
@ -0,0 +1,5 @@
|
|||
APPFLOWY_WORKER_REDIS_URL=
|
||||
APPFLOWY_WORKER_DATABASE_URL=
|
||||
APPFLOWY_WORKER_DATABASE_NAME=postgres
|
||||
APPFLOWY_WORKER_ENVIRONMENT=production
|
||||
APPFLOWY_WORKER_DATABASE_MAX_CONNECTIONS=10
|
165
services/appflowy-worker/src/application.rs
Normal file
165
services/appflowy-worker/src/application.rs
Normal file
|
@ -0,0 +1,165 @@
|
|||
use crate::config::{Config, DatabaseSetting, Environment, S3Setting};
|
||||
use anyhow::Error;
|
||||
use redis::aio::ConnectionManager;
|
||||
use sqlx::postgres::PgPoolOptions;
|
||||
use sqlx::PgPool;
|
||||
|
||||
use crate::import_worker::worker::run_import_worker;
|
||||
use aws_sdk_s3::config::{Credentials, Region, SharedCredentialsProvider};
|
||||
|
||||
use crate::import_worker::email_notifier::EmailNotifier;
|
||||
use crate::s3_client::S3ClientImpl;
|
||||
|
||||
use axum::Router;
|
||||
use secrecy::ExposeSecret;
|
||||
|
||||
use std::sync::{Arc, Once};
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::task::LocalSet;
|
||||
use tracing::info;
|
||||
use tracing::subscriber::set_global_default;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
pub async fn run_server(
|
||||
listener: TcpListener,
|
||||
config: Config,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
dotenvy::dotenv().ok();
|
||||
init_subscriber(&config.app_env);
|
||||
info!("config loaded: {:?}", &config);
|
||||
|
||||
// Start the server
|
||||
info!("Starting server at: {:?}", listener.local_addr());
|
||||
create_app(listener, config).await.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn init_subscriber(app_env: &Environment) {
|
||||
static START: Once = Once::new();
|
||||
START.call_once(|| {
|
||||
let level = std::env::var("RUST_LOG").unwrap_or("info".to_string());
|
||||
let mut filters = vec![];
|
||||
filters.push(format!("appflowy_worker={}", level));
|
||||
let env_filter = EnvFilter::new(filters.join(","));
|
||||
|
||||
let builder = tracing_subscriber::fmt()
|
||||
.with_target(true)
|
||||
.with_max_level(tracing::Level::TRACE)
|
||||
.with_thread_ids(false)
|
||||
.with_file(false);
|
||||
|
||||
match app_env {
|
||||
Environment::Local => {
|
||||
let subscriber = builder
|
||||
.with_ansi(true)
|
||||
.with_target(false)
|
||||
.with_file(false)
|
||||
.pretty()
|
||||
.finish()
|
||||
.with(env_filter);
|
||||
set_global_default(subscriber).unwrap();
|
||||
},
|
||||
Environment::Production => {
|
||||
let subscriber = builder.json().finish().with(env_filter);
|
||||
set_global_default(subscriber).unwrap();
|
||||
},
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub async fn create_app(listener: TcpListener, config: Config) -> Result<(), Error> {
|
||||
// Postgres
|
||||
info!("Preparing to run database migrations...");
|
||||
let pg_pool = get_connection_pool(&config.db_settings).await?;
|
||||
|
||||
// Redis
|
||||
let redis_client = redis::Client::open(config.redis_url)
|
||||
.expect("failed to create redis client")
|
||||
.get_connection_manager()
|
||||
.await
|
||||
.expect("failed to get redis connection manager");
|
||||
|
||||
let s3_client = get_aws_s3_client(&config.s3_setting).await?;
|
||||
|
||||
let state = AppState {
|
||||
redis_client,
|
||||
pg_pool,
|
||||
s3_client,
|
||||
};
|
||||
|
||||
let local_set = LocalSet::new();
|
||||
let email_notifier = EmailNotifier;
|
||||
let import_worker_fut = local_set.run_until(run_import_worker(
|
||||
state.pg_pool.clone(),
|
||||
state.redis_client.clone(),
|
||||
Arc::new(state.s3_client.clone()),
|
||||
Arc::new(email_notifier),
|
||||
"import_task_stream",
|
||||
30,
|
||||
));
|
||||
|
||||
let app = Router::new().with_state(state);
|
||||
|
||||
tokio::select! {
|
||||
_ = import_worker_fut => {
|
||||
info!("Notion importer stopped");
|
||||
},
|
||||
_ = axum::serve(listener, app) => {
|
||||
info!("worker stopped");
|
||||
},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub redis_client: ConnectionManager,
|
||||
pub pg_pool: PgPool,
|
||||
pub s3_client: S3ClientImpl,
|
||||
}
|
||||
|
||||
async fn get_connection_pool(setting: &DatabaseSetting) -> Result<PgPool, Error> {
|
||||
info!(
|
||||
"Connecting to postgres database with setting: {:?}",
|
||||
setting
|
||||
);
|
||||
PgPoolOptions::new()
|
||||
.max_connections(setting.max_connections)
|
||||
.acquire_timeout(Duration::from_secs(10))
|
||||
.max_lifetime(Duration::from_secs(30 * 60))
|
||||
.idle_timeout(Duration::from_secs(30))
|
||||
.connect_with(setting.with_db())
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to connect to postgres database: {}", e))
|
||||
}
|
||||
|
||||
pub async fn get_aws_s3_client(s3_setting: &S3Setting) -> Result<S3ClientImpl, Error> {
|
||||
let credentials = Credentials::new(
|
||||
s3_setting.access_key.clone(),
|
||||
s3_setting.secret_key.expose_secret().clone(),
|
||||
None,
|
||||
None,
|
||||
"appflowy-worker",
|
||||
);
|
||||
let shared_credentials = SharedCredentialsProvider::new(credentials);
|
||||
|
||||
// Configure the AWS SDK
|
||||
let config_builder = aws_sdk_s3::Config::builder()
|
||||
.credentials_provider(shared_credentials)
|
||||
.force_path_style(true)
|
||||
.region(Region::new(s3_setting.region.clone()));
|
||||
|
||||
let config = if s3_setting.use_minio {
|
||||
config_builder.endpoint_url(&s3_setting.minio_url).build()
|
||||
} else {
|
||||
config_builder.build()
|
||||
};
|
||||
let client = aws_sdk_s3::Client::from_conf(config);
|
||||
Ok(S3ClientImpl {
|
||||
inner: client,
|
||||
bucket: s3_setting.bucket.clone(),
|
||||
})
|
||||
}
|
109
services/appflowy-worker/src/config.rs
Normal file
109
services/appflowy-worker/src/config.rs
Normal file
|
@ -0,0 +1,109 @@
|
|||
use anyhow::{Context, Error};
|
||||
use infra::env_util::get_env_var;
|
||||
use secrecy::Secret;
|
||||
use serde::Deserialize;
|
||||
use sqlx::postgres::{PgConnectOptions, PgSslMode};
|
||||
use std::str::FromStr;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Config {
|
||||
pub app_env: Environment,
|
||||
pub redis_url: String,
|
||||
pub db_settings: DatabaseSetting,
|
||||
pub s3_setting: S3Setting,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_env() -> Result<Self, Error> {
|
||||
Ok(Config {
|
||||
app_env: get_env_var("APPFLOWY_WORKER_ENVIRONMENT", "local")
|
||||
.parse()
|
||||
.context("fail to get APPFLOWY_WORKER_ENVIRONMENT")?,
|
||||
redis_url: get_env_var("APPFLOWY_WORKER_REDIS_URL", "redis://localhost:6379"),
|
||||
db_settings: DatabaseSetting {
|
||||
pg_conn_opts: PgConnectOptions::from_str(&get_env_var(
|
||||
"APPFLOWY_WORKER_DATABASE_URL",
|
||||
"postgres://postgres:password@localhost:5432/postgres",
|
||||
))?,
|
||||
require_ssl: get_env_var("APPFLOWY_WORKER_DATABASE_REQUIRE_SSL", "false")
|
||||
.parse()
|
||||
.context("fail to get APPFLOWY_WORKER_DATABASE_REQUIRE_SSL")?,
|
||||
max_connections: get_env_var("APPFLOWY_WORKER_DATABASE_MAX_CONNECTIONS", "10")
|
||||
.parse()
|
||||
.context("fail to get APPFLOWY_WORKER_DATABASE_MAX_CONNECTIONS")?,
|
||||
database_name: get_env_var("APPFLOWY_WORKER_DATABASE_NAME", "postgres"),
|
||||
},
|
||||
s3_setting: S3Setting {
|
||||
use_minio: get_env_var("APPFLOWY_S3_USE_MINIO", "true")
|
||||
.parse()
|
||||
.context("fail to get APPFLOWY_S3_USE_MINIO")?,
|
||||
minio_url: get_env_var("APPFLOWY_S3_MINIO_URL", "http://localhost:9000"),
|
||||
access_key: get_env_var("APPFLOWY_S3_ACCESS_KEY", "minioadmin"),
|
||||
secret_key: get_env_var("APPFLOWY_S3_SECRET_KEY", "minioadmin").into(),
|
||||
bucket: get_env_var("APPFLOWY_S3_BUCKET", "appflowy"),
|
||||
region: get_env_var("APPFLOWY_S3_REGION", ""),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DatabaseSetting {
|
||||
pub pg_conn_opts: PgConnectOptions,
|
||||
pub require_ssl: bool,
|
||||
pub max_connections: u32,
|
||||
pub database_name: String,
|
||||
}
|
||||
|
||||
impl DatabaseSetting {
|
||||
pub fn without_db(&self) -> PgConnectOptions {
|
||||
let ssl_mode = if self.require_ssl {
|
||||
PgSslMode::Require
|
||||
} else {
|
||||
PgSslMode::Prefer
|
||||
};
|
||||
let options = self.pg_conn_opts.clone();
|
||||
options.ssl_mode(ssl_mode)
|
||||
}
|
||||
|
||||
pub fn with_db(&self) -> PgConnectOptions {
|
||||
self.without_db().database(&self.database_name)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct StreamSetting {
|
||||
/// The key of the stream that contains control event, [CollabControlEvent].
|
||||
pub control_key: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize)]
|
||||
pub enum Environment {
|
||||
Local,
|
||||
Production,
|
||||
}
|
||||
|
||||
impl FromStr for Environment {
|
||||
type Err = anyhow::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"local" => Ok(Self::Local),
|
||||
"production" => Ok(Self::Production),
|
||||
other => anyhow::bail!(
|
||||
"{} is not a supported environment. Use either `local` or `production`.",
|
||||
other
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, Clone, Debug)]
|
||||
pub struct S3Setting {
|
||||
pub use_minio: bool,
|
||||
pub minio_url: String,
|
||||
pub access_key: String,
|
||||
pub secret_key: Secret<String>,
|
||||
pub bucket: String,
|
||||
pub region: String,
|
||||
}
|
32
services/appflowy-worker/src/error.rs
Normal file
32
services/appflowy-worker/src/error.rs
Normal file
|
@ -0,0 +1,32 @@
|
|||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum WorkerError {
|
||||
#[error(transparent)]
|
||||
ZipError(#[from] async_zip::error::ZipError),
|
||||
|
||||
#[error("Record not found: {0}")]
|
||||
RecordNotFound(String),
|
||||
|
||||
#[error(transparent)]
|
||||
IOError(#[from] std::io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
ImportError(#[from] ImportError),
|
||||
|
||||
#[error(transparent)]
|
||||
Internal(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum ImportError {
|
||||
#[error("Can not open the imported workspace: {0}")]
|
||||
OpenImportWorkspaceError(String),
|
||||
|
||||
#[error(transparent)]
|
||||
ImportCollabError(#[from] collab_importer::error::ImporterError),
|
||||
|
||||
#[error("Can not open the workspace:{0}")]
|
||||
CannotOpenWorkspace(String),
|
||||
|
||||
#[error(transparent)]
|
||||
Internal(#[from] anyhow::Error),
|
||||
}
|
14
services/appflowy-worker/src/import_worker/email_notifier.rs
Normal file
14
services/appflowy-worker/src/import_worker/email_notifier.rs
Normal file
|
@ -0,0 +1,14 @@
|
|||
use crate::import_worker::report::{ImportNotifier, ImportProgress};
|
||||
use axum::async_trait;
|
||||
|
||||
pub struct EmailNotifier;
|
||||
|
||||
#[async_trait]
|
||||
impl ImportNotifier for EmailNotifier {
|
||||
async fn notify_progress(&self, progress: ImportProgress) {
|
||||
match progress {
|
||||
ImportProgress::Started { workspace_id: _ } => {},
|
||||
ImportProgress::Finished(_result) => {},
|
||||
}
|
||||
}
|
||||
}
|
4
services/appflowy-worker/src/import_worker/mod.rs
Normal file
4
services/appflowy-worker/src/import_worker/mod.rs
Normal file
|
@ -0,0 +1,4 @@
|
|||
pub mod email_notifier;
|
||||
pub mod report;
|
||||
pub mod unzip;
|
||||
pub mod worker;
|
33
services/appflowy-worker/src/import_worker/report.rs
Normal file
33
services/appflowy-worker/src/import_worker/report.rs
Normal file
|
@ -0,0 +1,33 @@
|
|||
use axum::async_trait;
|
||||
|
||||
#[async_trait]
|
||||
pub trait ImportNotifier: Send + Sync + 'static {
|
||||
async fn notify_progress(&self, progress: ImportProgress);
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ImportProgress {
|
||||
Started { workspace_id: String },
|
||||
Finished(ImportResult),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ImportResult {
|
||||
pub workspace_id: String,
|
||||
}
|
||||
|
||||
pub struct ImportResultBuilder {
|
||||
workspace_id: String,
|
||||
}
|
||||
|
||||
impl ImportResultBuilder {
|
||||
pub fn new(workspace_id: String) -> Self {
|
||||
Self { workspace_id }
|
||||
}
|
||||
|
||||
pub fn build(self) -> ImportResult {
|
||||
ImportResult {
|
||||
workspace_id: self.workspace_id,
|
||||
}
|
||||
}
|
||||
}
|
52
services/appflowy-worker/src/import_worker/unzip.rs
Normal file
52
services/appflowy-worker/src/import_worker/unzip.rs
Normal file
|
@ -0,0 +1,52 @@
|
|||
use anyhow::Result;
|
||||
use async_zip::base::read::stream::{Ready, ZipFileReader};
|
||||
use futures::io::{AsyncBufRead, AsyncReadExt};
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs::{self, File};
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
pub struct UnzipFile {
|
||||
pub file_name: String,
|
||||
pub unzip_dir_path: PathBuf,
|
||||
}
|
||||
|
||||
pub async fn unzip_async<R: AsyncBufRead + Unpin>(
|
||||
mut zip_reader: ZipFileReader<Ready<R>>,
|
||||
out: PathBuf,
|
||||
) -> Result<UnzipFile, anyhow::Error> {
|
||||
let mut real_file_name = None;
|
||||
while let Some(mut next_reader) = zip_reader.next_with_entry().await? {
|
||||
let entry_reader = next_reader.reader_mut();
|
||||
let filename = entry_reader.entry().filename().as_str()?;
|
||||
|
||||
if real_file_name.is_none() && filename.ends_with('/') {
|
||||
real_file_name = Some(filename.split('/').next().unwrap_or(filename).to_string());
|
||||
}
|
||||
|
||||
let output_path = out.join(filename);
|
||||
if filename.ends_with('/') {
|
||||
fs::create_dir_all(&output_path).await?;
|
||||
} else {
|
||||
if let Some(parent) = output_path.parent() {
|
||||
if !parent.exists() {
|
||||
fs::create_dir_all(parent).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let mut outfile = File::create(&output_path).await?;
|
||||
let mut buffer = vec![];
|
||||
entry_reader.read_to_end(&mut buffer).await?;
|
||||
outfile.write_all(&buffer).await?;
|
||||
}
|
||||
|
||||
zip_reader = next_reader.done().await?;
|
||||
}
|
||||
|
||||
match real_file_name {
|
||||
None => Err(anyhow::anyhow!("No files found in zip archive")),
|
||||
Some(file_name) => Ok(UnzipFile {
|
||||
file_name: file_name.clone(),
|
||||
unzip_dir_path: out.join(file_name),
|
||||
}),
|
||||
}
|
||||
}
|
759
services/appflowy-worker/src/import_worker/worker.rs
Normal file
759
services/appflowy-worker/src/import_worker/worker.rs
Normal file
|
@ -0,0 +1,759 @@
|
|||
use crate::error::ImportError;
|
||||
use crate::import_worker::unzip::unzip_async;
|
||||
use crate::s3_client::S3StreamResponse;
|
||||
use anyhow::anyhow;
|
||||
use async_zip::base::read::stream::ZipFileReader;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use bytes::Bytes;
|
||||
use collab::core::origin::CollabOrigin;
|
||||
use collab::entity::EncodedCollab;
|
||||
|
||||
use collab_database::workspace_database::WorkspaceDatabaseBody;
|
||||
|
||||
use collab_entity::CollabType;
|
||||
use collab_folder::Folder;
|
||||
use collab_importer::imported_collab::ImportType;
|
||||
use collab_importer::notion::page::CollabResource;
|
||||
use collab_importer::notion::NotionImporter;
|
||||
use collab_importer::util::FileId;
|
||||
use database::collab::{insert_into_af_collab_bulk_for_user, select_blob_from_af_collab};
|
||||
use futures::stream::FuturesUnordered;
|
||||
use futures::{stream, StreamExt};
|
||||
use redis::aio::ConnectionManager;
|
||||
use redis::streams::{
|
||||
StreamClaimOptions, StreamClaimReply, StreamId, StreamPendingReply, StreamReadOptions,
|
||||
StreamReadReply,
|
||||
};
|
||||
use redis::{AsyncCommands, RedisResult, Value};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::from_str;
|
||||
use sqlx::types::chrono;
|
||||
use sqlx::{PgPool, Pool, Postgres};
|
||||
use std::collections::HashMap;
|
||||
use std::env::temp_dir;
|
||||
use std::fs::Permissions;
|
||||
use std::ops::DerefMut;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::import_worker::report::{ImportNotifier, ImportProgress, ImportResultBuilder};
|
||||
use database::workspace::{
|
||||
select_workspace_database_storage_id, update_import_task_status, update_workspace_status,
|
||||
};
|
||||
use database_entity::dto::CollabParams;
|
||||
|
||||
use crate::s3_client::S3Client;
|
||||
use database::collab::mem_cache::{cache_exp_secs_from_collab_type, CollabMemCache};
|
||||
use tokio::task::spawn_local;
|
||||
use tokio::time::interval;
|
||||
use tracing::{error, info, trace, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
const GROUP_NAME: &str = "import_task_group";
|
||||
const CONSUMER_NAME: &str = "appflowy_worker";
|
||||
pub async fn run_import_worker(
|
||||
pg_pool: PgPool,
|
||||
mut redis_client: ConnectionManager,
|
||||
s3_client: Arc<dyn S3Client>,
|
||||
notifier: Arc<dyn ImportNotifier>,
|
||||
stream_name: &str,
|
||||
tick_interval_secs: u64,
|
||||
) -> Result<(), ImportError> {
|
||||
info!("Starting importer worker");
|
||||
if let Err(err) = ensure_consumer_group(stream_name, GROUP_NAME, &mut redis_client)
|
||||
.await
|
||||
.map_err(ImportError::Internal)
|
||||
{
|
||||
error!("Failed to ensure consumer group: {:?}", err);
|
||||
}
|
||||
|
||||
process_un_acked_tasks(
|
||||
&mut redis_client,
|
||||
&s3_client,
|
||||
&pg_pool,
|
||||
stream_name,
|
||||
GROUP_NAME,
|
||||
CONSUMER_NAME,
|
||||
notifier.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
process_upcoming_tasks(
|
||||
&mut redis_client,
|
||||
&s3_client,
|
||||
pg_pool,
|
||||
stream_name,
|
||||
GROUP_NAME,
|
||||
CONSUMER_NAME,
|
||||
notifier.clone(),
|
||||
tick_interval_secs,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn process_un_acked_tasks(
|
||||
redis_client: &mut ConnectionManager,
|
||||
s3_client: &Arc<dyn S3Client>,
|
||||
pg_pool: &PgPool,
|
||||
stream_name: &str,
|
||||
group_name: &str,
|
||||
consumer_name: &str,
|
||||
notifier: Arc<dyn ImportNotifier>,
|
||||
) {
|
||||
// when server restarts, we need to check if there are any unacknowledged tasks
|
||||
match get_un_ack_tasks(stream_name, group_name, consumer_name, redis_client).await {
|
||||
Ok(un_ack_tasks) => {
|
||||
info!("Found {} unacknowledged tasks", un_ack_tasks.len());
|
||||
for un_ack_task in un_ack_tasks {
|
||||
// Ignore the error here since the consume task will handle the error
|
||||
let _ = consume_task(
|
||||
stream_name,
|
||||
group_name,
|
||||
un_ack_task.task,
|
||||
&un_ack_task.stream_id.id,
|
||||
redis_client,
|
||||
s3_client,
|
||||
pg_pool,
|
||||
notifier.clone(),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
},
|
||||
Err(err) => error!("Failed to get unacknowledged tasks: {:?}", err),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn process_upcoming_tasks(
|
||||
redis_client: &mut ConnectionManager,
|
||||
s3_client: &Arc<dyn S3Client>,
|
||||
pg_pool: PgPool,
|
||||
stream_name: &str,
|
||||
group_name: &str,
|
||||
consumer_name: &str,
|
||||
notifier: Arc<dyn ImportNotifier>,
|
||||
interval_secs: u64,
|
||||
) -> Result<(), ImportError> {
|
||||
let options = StreamReadOptions::default()
|
||||
.group(group_name, consumer_name)
|
||||
.count(3);
|
||||
let mut interval = interval(Duration::from_secs(interval_secs));
|
||||
interval.tick().await;
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let tasks: StreamReadReply = match redis_client
|
||||
.xread_options(&[stream_name], &[">"], &options)
|
||||
.await
|
||||
{
|
||||
Ok(tasks) => tasks,
|
||||
Err(err) => {
|
||||
error!("Failed to read tasks from Redis stream: {:?}", err);
|
||||
continue;
|
||||
},
|
||||
};
|
||||
|
||||
let mut task_handlers = FuturesUnordered::new();
|
||||
for stream_key in tasks.keys {
|
||||
// For each stream key, iterate through the stream entries
|
||||
for stream_id in stream_key.ids {
|
||||
match ImportTask::try_from(&stream_id) {
|
||||
Ok(import_task) => {
|
||||
let entry_id = stream_id.id.clone();
|
||||
let mut cloned_redis_client = redis_client.clone();
|
||||
let cloned_s3_client = s3_client.clone();
|
||||
let pg_pool = pg_pool.clone();
|
||||
let notifier = notifier.clone();
|
||||
let stream_name = stream_name.to_string();
|
||||
let group_name = group_name.to_string();
|
||||
task_handlers.push(spawn_local(async move {
|
||||
consume_task(
|
||||
&stream_name,
|
||||
&group_name,
|
||||
import_task,
|
||||
&entry_id,
|
||||
&mut cloned_redis_client,
|
||||
&cloned_s3_client,
|
||||
&pg_pool,
|
||||
notifier,
|
||||
)
|
||||
.await?;
|
||||
Ok::<(), ImportError>(())
|
||||
}));
|
||||
},
|
||||
Err(err) => {
|
||||
error!("Failed to deserialize task: {:?}", err);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(result) = task_handlers.next().await {
|
||||
match result {
|
||||
Ok(Ok(())) => trace!("Task completed successfully"),
|
||||
Ok(Err(e)) => error!("Task failed: {:?}", e),
|
||||
Err(e) => error!("Runtime error: {:?}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn consume_task(
|
||||
stream_name: &str,
|
||||
group_name: &str,
|
||||
import_task: ImportTask,
|
||||
entry_id: &String,
|
||||
redis_client: &mut ConnectionManager,
|
||||
s3_client: &Arc<dyn S3Client>,
|
||||
pg_pool: &Pool<Postgres>,
|
||||
notifier: Arc<dyn ImportNotifier>,
|
||||
) -> Result<(), ImportError> {
|
||||
process_task(import_task, s3_client, redis_client, pg_pool, notifier).await?;
|
||||
let _: () = redis_client
|
||||
.xack(stream_name, group_name, &[entry_id])
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Failed to acknowledge task: {:?}", e);
|
||||
ImportError::Internal(e.into())
|
||||
})?;
|
||||
Ok::<_, ImportError>(())
|
||||
}
|
||||
|
||||
async fn process_task(
|
||||
import_task: ImportTask,
|
||||
s3_client: &Arc<dyn S3Client>,
|
||||
redis_client: &mut ConnectionManager,
|
||||
pg_pool: &PgPool,
|
||||
notifier: Arc<dyn ImportNotifier>,
|
||||
) -> Result<(), ImportError> {
|
||||
trace!("Processing task: {:?}", import_task);
|
||||
match import_task {
|
||||
ImportTask::Notion(task) => {
|
||||
// 1. unzip file to temp dir
|
||||
let unzip_dir_path = download_zip_file(&task, s3_client).await?;
|
||||
// 2. import zip
|
||||
let result =
|
||||
process_unzip_file(&task, &unzip_dir_path, pg_pool, redis_client, s3_client).await;
|
||||
// 3. delete zip file regardless of success or failure
|
||||
match fs::remove_dir_all(unzip_dir_path).await {
|
||||
Ok(_) => trace!("[Import]: {} deleted unzip file", task.workspace_id),
|
||||
Err(err) => error!("Failed to delete unzip file: {:?}", err),
|
||||
}
|
||||
// 4. notify import result
|
||||
trace!(
|
||||
"[Import]: {}:{} import result: {:?}",
|
||||
task.workspace_id,
|
||||
task.task_id,
|
||||
result
|
||||
);
|
||||
notify_user(&task, result, notifier).await?;
|
||||
// 5. remove file from S3
|
||||
if let Err(err) = s3_client.delete_blob(task.s3_key.as_str()).await {
|
||||
error!("Failed to delete zip file from S3: {:?}", err);
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
ImportTask::Custom(value) => {
|
||||
trace!("Custom task: {:?}", value);
|
||||
match value.get("workspace_id").and_then(|v| v.as_str()) {
|
||||
None => {
|
||||
warn!("Missing workspace_id in custom task");
|
||||
},
|
||||
Some(workspace_id) => {
|
||||
let result = ImportResultBuilder::new(workspace_id.to_string()).build();
|
||||
notifier
|
||||
.notify_progress(ImportProgress::Finished(result))
|
||||
.await;
|
||||
},
|
||||
}
|
||||
Ok(())
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn download_zip_file(
|
||||
import_task: &NotionImportTask,
|
||||
s3_client: &Arc<dyn S3Client>,
|
||||
) -> Result<PathBuf, ImportError> {
|
||||
let S3StreamResponse {
|
||||
stream,
|
||||
content_type: _,
|
||||
} = s3_client
|
||||
.get_blob(import_task.s3_key.as_str())
|
||||
.await
|
||||
.map_err(|err| ImportError::Internal(err.into()))?;
|
||||
|
||||
let zip_reader = ZipFileReader::new(stream);
|
||||
let unique_file_name = uuid::Uuid::new_v4().to_string();
|
||||
let output_file_path = temp_dir().join(unique_file_name);
|
||||
fs::create_dir_all(&output_file_path)
|
||||
.await
|
||||
.map_err(|err| ImportError::Internal(err.into()))?;
|
||||
|
||||
fs::set_permissions(&output_file_path, Permissions::from_mode(0o777))
|
||||
.await
|
||||
.map_err(|err| {
|
||||
ImportError::Internal(anyhow!("Failed to set permissions for temp dir: {:?}", err))
|
||||
})?;
|
||||
|
||||
let unzip_file = unzip_async(zip_reader, output_file_path)
|
||||
.await
|
||||
.map_err(ImportError::Internal)?;
|
||||
Ok(unzip_file.unzip_dir_path)
|
||||
}
|
||||
|
||||
async fn process_unzip_file(
|
||||
import_task: &NotionImportTask,
|
||||
unzip_dir_path: &PathBuf,
|
||||
pg_pool: &PgPool,
|
||||
redis_client: &mut ConnectionManager,
|
||||
s3_client: &Arc<dyn S3Client>,
|
||||
) -> Result<(), ImportError> {
|
||||
let notion_importer = NotionImporter::new(
|
||||
unzip_dir_path,
|
||||
import_task.workspace_id.clone(),
|
||||
import_task.host.clone(),
|
||||
)
|
||||
.map_err(ImportError::ImportCollabError)?;
|
||||
|
||||
let imported = notion_importer
|
||||
.import()
|
||||
.await
|
||||
.map_err(ImportError::ImportCollabError)?;
|
||||
let nested_views = imported.build_nested_views(import_task.uid).await;
|
||||
trace!(
|
||||
"[Import]: {} imported nested views:{}",
|
||||
import_task.workspace_id,
|
||||
nested_views
|
||||
);
|
||||
|
||||
// 1. Open the workspace folder
|
||||
let folder_collab =
|
||||
get_encode_collab_from_bytes(&imported.workspace_id, &CollabType::Folder, pg_pool).await?;
|
||||
let mut folder = Folder::from_collab_doc_state(
|
||||
import_task.uid,
|
||||
CollabOrigin::Server,
|
||||
folder_collab.into(),
|
||||
&imported.workspace_id,
|
||||
vec![],
|
||||
)
|
||||
.map_err(|err| ImportError::CannotOpenWorkspace(err.to_string()))?;
|
||||
|
||||
// 2. Insert collabs' views into the folder
|
||||
trace!(
|
||||
"[Import]: {} insert views:{} to folder",
|
||||
import_task.workspace_id,
|
||||
nested_views.len()
|
||||
);
|
||||
folder.insert_nested_views(nested_views.into_inner());
|
||||
|
||||
let mut resources = vec![];
|
||||
let mut collab_params_list = vec![];
|
||||
let mut database_view_ids_by_database_id: HashMap<String, Vec<String>> = HashMap::new();
|
||||
let mem_cache = CollabMemCache::new(redis_client.clone());
|
||||
let timestamp = chrono::Utc::now().timestamp();
|
||||
|
||||
// 3. Collect all collabs and resources
|
||||
let mut stream = imported.into_collab_stream().await;
|
||||
while let Some(imported_collab) = stream.next().await {
|
||||
trace!(
|
||||
"[Import]: {} imported collab: {}",
|
||||
import_task.workspace_id,
|
||||
imported_collab
|
||||
);
|
||||
resources.push(imported_collab.resource);
|
||||
collab_params_list.extend(
|
||||
imported_collab
|
||||
.collabs
|
||||
.into_iter()
|
||||
.map(|imported_collab| CollabParams {
|
||||
object_id: imported_collab.object_id,
|
||||
collab_type: imported_collab.collab_type,
|
||||
embeddings: None,
|
||||
encoded_collab_v1: Bytes::from(imported_collab.encoded_collab.encode_to_bytes().unwrap()),
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
match imported_collab.import_type {
|
||||
ImportType::Database {
|
||||
database_id,
|
||||
view_ids,
|
||||
} => {
|
||||
database_view_ids_by_database_id.insert(database_id, view_ids);
|
||||
},
|
||||
ImportType::Document => {
|
||||
// do nothing
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
let w_database_id = select_workspace_database_storage_id(pg_pool, &import_task.workspace_id)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
ImportError::Internal(anyhow!(
|
||||
"Failed to select workspace database storage id: {:?}",
|
||||
err
|
||||
))
|
||||
})
|
||||
.map(|id| id.to_string())?;
|
||||
|
||||
// 4. Edit workspace database collab and then encode workspace database collab
|
||||
if !database_view_ids_by_database_id.is_empty() {
|
||||
let w_db_collab =
|
||||
get_encode_collab_from_bytes(&w_database_id, &CollabType::WorkspaceDatabase, pg_pool).await?;
|
||||
let mut w_database = WorkspaceDatabaseBody::from_collab_doc_state(
|
||||
&w_database_id,
|
||||
CollabOrigin::Server,
|
||||
w_db_collab.into(),
|
||||
)
|
||||
.map_err(|err| ImportError::CannotOpenWorkspace(err.to_string()))?;
|
||||
w_database.batch_add_database(database_view_ids_by_database_id);
|
||||
|
||||
let w_database_collab = w_database.encode_collab_v1().map_err(|err| {
|
||||
ImportError::Internal(anyhow!(
|
||||
"Failed to encode workspace database collab: {:?}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
// Update the workspace database cache because newly created workspace databases are cached in Redis.
|
||||
mem_cache
|
||||
.insert_encode_collab(
|
||||
&w_database_id,
|
||||
w_database_collab.clone(),
|
||||
timestamp,
|
||||
cache_exp_secs_from_collab_type(&CollabType::WorkspaceDatabase),
|
||||
)
|
||||
.await;
|
||||
|
||||
trace!(
|
||||
"[Import]: {} did encode workspace database collab",
|
||||
import_task.workspace_id
|
||||
);
|
||||
let w_database_collab_params = CollabParams {
|
||||
object_id: w_database_id.clone(),
|
||||
collab_type: CollabType::WorkspaceDatabase,
|
||||
embeddings: None,
|
||||
encoded_collab_v1: Bytes::from(w_database_collab.encode_to_bytes().unwrap()),
|
||||
};
|
||||
collab_params_list.push(w_database_collab_params);
|
||||
}
|
||||
|
||||
// 5. Encode Folder
|
||||
let folder_collab = folder
|
||||
.encode_collab_v1(|collab| CollabType::Folder.validate_require_data(collab))
|
||||
.map_err(|err| ImportError::Internal(err.into()))?;
|
||||
|
||||
// Update the folder cache because newly created folders are cached in Redis.
|
||||
// Other collaboration objects do not use caching yet, so there is no need to insert them into Redis.
|
||||
mem_cache
|
||||
.insert_encode_collab(
|
||||
&import_task.workspace_id,
|
||||
folder_collab.clone(),
|
||||
timestamp,
|
||||
cache_exp_secs_from_collab_type(&CollabType::Folder),
|
||||
)
|
||||
.await;
|
||||
|
||||
let folder_collab_params = CollabParams {
|
||||
object_id: import_task.workspace_id.clone(),
|
||||
collab_type: CollabType::Folder,
|
||||
embeddings: None,
|
||||
encoded_collab_v1: Bytes::from(folder_collab.encode_to_bytes().unwrap()),
|
||||
};
|
||||
trace!(
|
||||
"[Import]: {} did encode folder collab",
|
||||
import_task.workspace_id
|
||||
);
|
||||
collab_params_list.push(folder_collab_params);
|
||||
|
||||
// 6. Start a transaction to insert all collabs
|
||||
let mut transaction = pg_pool.begin().await.map_err(|err| {
|
||||
ImportError::Internal(anyhow!(
|
||||
"Failed to start transaction when importing data: {:?}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
trace!(
|
||||
"[Import]: {} insert collabs into database",
|
||||
import_task.workspace_id
|
||||
);
|
||||
|
||||
// 7. write all collab to disk
|
||||
insert_into_af_collab_bulk_for_user(
|
||||
&mut transaction,
|
||||
&import_task.uid,
|
||||
&import_task.workspace_id,
|
||||
&collab_params_list,
|
||||
)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
ImportError::Internal(anyhow!(
|
||||
"Failed to insert collabs into database when importing data: {:?}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
trace!(
|
||||
"[Import]: {} update task:{} status to completed",
|
||||
import_task.workspace_id,
|
||||
import_task.task_id,
|
||||
);
|
||||
update_import_task_status(&import_task.task_id, 1, transaction.deref_mut())
|
||||
.await
|
||||
.map_err(|err| {
|
||||
ImportError::Internal(anyhow!(
|
||||
"Failed to update import task status when importing data: {:?}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
trace!(
|
||||
"[Import]: {} set is_initialized to true",
|
||||
import_task.workspace_id,
|
||||
);
|
||||
update_workspace_status(transaction.deref_mut(), &import_task.workspace_id, true)
|
||||
.await
|
||||
.map_err(|err| {
|
||||
ImportError::Internal(anyhow!(
|
||||
"Failed to update workspace status when importing data: {:?}",
|
||||
err
|
||||
))
|
||||
})?;
|
||||
|
||||
let result = transaction.commit().await.map_err(|err| {
|
||||
ImportError::Internal(anyhow!(
|
||||
"Failed to commit transaction when importing data: {:?}",
|
||||
err
|
||||
))
|
||||
});
|
||||
|
||||
if result.is_err() {
|
||||
// remove cache in redis
|
||||
let _ = mem_cache.remove_encode_collab(&w_database_id).await;
|
||||
let _ = mem_cache
|
||||
.remove_encode_collab(&import_task.workspace_id)
|
||||
.await;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// 7. after inserting all collabs, upload all files to S3
|
||||
trace!("[Import]: {} upload files to s3", import_task.workspace_id,);
|
||||
batch_upload_files_to_s3(&import_task.workspace_id, s3_client, resources)
|
||||
.await
|
||||
.map_err(|err| ImportError::Internal(anyhow!("Failed to upload files to S3: {:?}", err)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn notify_user(
|
||||
_import_task: &NotionImportTask,
|
||||
_result: Result<(), ImportError>,
|
||||
_notifier: Arc<dyn ImportNotifier>,
|
||||
) -> Result<(), ImportError> {
|
||||
// send email
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn batch_upload_files_to_s3(
|
||||
workspace_id: &str,
|
||||
client: &Arc<dyn S3Client>,
|
||||
collab_resources: Vec<CollabResource>,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
// Flatten the collab_resources into an iterator of (workspace_id, object_id, file_path)
|
||||
let file_tasks = collab_resources
|
||||
.into_iter()
|
||||
.flat_map(|resource| {
|
||||
let object_id = resource.object_id;
|
||||
resource
|
||||
.files
|
||||
.into_iter()
|
||||
.map(move |file| (object_id.clone(), file))
|
||||
})
|
||||
.collect::<Vec<(String, String)>>();
|
||||
|
||||
// Create a stream of upload tasks
|
||||
let upload_stream = stream::iter(file_tasks.into_iter().map(
|
||||
|(object_id, file_path)| async move {
|
||||
match upload_file_to_s3(client, workspace_id, &object_id, &file_path).await {
|
||||
Ok(_) => {
|
||||
trace!("Successfully uploaded: {}", file_path);
|
||||
Ok(())
|
||||
},
|
||||
Err(e) => {
|
||||
error!("Failed to upload {}: {:?}", file_path, e);
|
||||
Err(e)
|
||||
},
|
||||
}
|
||||
},
|
||||
))
|
||||
.buffer_unordered(5);
|
||||
let results: Vec<_> = upload_stream.collect().await;
|
||||
let errors: Vec<_> = results.into_iter().filter_map(Result::err).collect();
|
||||
if errors.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("Some uploads failed: {:?}", errors))
|
||||
}
|
||||
}
|
||||
|
||||
async fn upload_file_to_s3(
|
||||
client: &Arc<dyn S3Client>,
|
||||
workspace_id: &str,
|
||||
object_id: &str,
|
||||
file_path: &str,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let path = Path::new(file_path);
|
||||
if !path.exists() {
|
||||
return Err(anyhow!("File does not exist: {:?}", path));
|
||||
}
|
||||
let file_id = FileId::from_path(&path.to_path_buf()).await?;
|
||||
let mime_type = mime_guess::from_path(file_path).first_or_octet_stream();
|
||||
let object_key = format!("{}/{}/{}", workspace_id, object_id, file_id);
|
||||
let byte_stream = ByteStream::from_path(path).await?;
|
||||
client
|
||||
.put_blob(&object_key, byte_stream, Some(mime_type.as_ref()))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_encode_collab_from_bytes(
|
||||
object_id: &str,
|
||||
collab_type: &CollabType,
|
||||
pg_pool: &PgPool,
|
||||
) -> Result<EncodedCollab, ImportError> {
|
||||
let bytes = select_blob_from_af_collab(pg_pool, collab_type, object_id)
|
||||
.await
|
||||
.map_err(|err| ImportError::Internal(err.into()))?;
|
||||
tokio::task::spawn_blocking(move || match EncodedCollab::decode_from_bytes(&bytes) {
|
||||
Ok(encoded_collab) => Ok(encoded_collab),
|
||||
Err(err) => Err(ImportError::Internal(anyhow!(
|
||||
"Failed to decode collab from bytes: {:?}",
|
||||
err
|
||||
))),
|
||||
})
|
||||
.await
|
||||
.map_err(|err| ImportError::Internal(err.into()))?
|
||||
}
|
||||
|
||||
/// Ensure the consumer group exists, if not, create it.
|
||||
async fn ensure_consumer_group(
|
||||
stream_key: &str,
|
||||
group_name: &str,
|
||||
redis_client: &mut ConnectionManager,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let result: RedisResult<()> = redis_client
|
||||
.xgroup_create_mkstream(stream_key, group_name, "0")
|
||||
.await;
|
||||
|
||||
if let Err(redis_error) = result {
|
||||
if let Some(code) = redis_error.code() {
|
||||
if code == "BUSYGROUP" {
|
||||
return Ok(()); // Group already exists, considered as success.
|
||||
}
|
||||
}
|
||||
error!("Error when creating consumer group: {:?}", redis_error);
|
||||
return Err(redis_error.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct UnAckTask {
|
||||
stream_id: StreamId,
|
||||
task: ImportTask,
|
||||
}
|
||||
|
||||
async fn get_un_ack_tasks(
|
||||
stream_key: &str,
|
||||
group_name: &str,
|
||||
consumer_name: &str,
|
||||
redis_client: &mut ConnectionManager,
|
||||
) -> Result<Vec<UnAckTask>, anyhow::Error> {
|
||||
let reply: StreamPendingReply = redis_client.xpending(stream_key, group_name).await?;
|
||||
match reply {
|
||||
StreamPendingReply::Empty => Ok(vec![]),
|
||||
StreamPendingReply::Data(pending) => {
|
||||
let opts = StreamClaimOptions::default()
|
||||
.idle(500)
|
||||
.with_force()
|
||||
.retry(2);
|
||||
|
||||
// If the start_id and end_id are the same, we only need to claim one message.
|
||||
let mut ids = Vec::with_capacity(2);
|
||||
ids.push(pending.start_id.clone());
|
||||
if pending.start_id != pending.end_id {
|
||||
ids.push(pending.end_id);
|
||||
}
|
||||
|
||||
let result: StreamClaimReply = redis_client
|
||||
.xclaim_options(stream_key, group_name, consumer_name, 500, &ids, opts)
|
||||
.await?;
|
||||
|
||||
let tasks = result
|
||||
.ids
|
||||
.into_iter()
|
||||
.filter_map(|stream_id| {
|
||||
ImportTask::try_from(&stream_id)
|
||||
.map(|task| UnAckTask { stream_id, task })
|
||||
.ok()
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
trace!("Claimed tasks: {}", tasks.len());
|
||||
Ok(tasks)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct NotionImportTask {
|
||||
pub uid: i64,
|
||||
pub task_id: Uuid,
|
||||
pub user_uuid: String,
|
||||
pub workspace_id: String,
|
||||
pub s3_key: String,
|
||||
pub host: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum ImportTask {
|
||||
Notion(NotionImportTask),
|
||||
Custom(serde_json::Value),
|
||||
}
|
||||
|
||||
impl TryFrom<&StreamId> for ImportTask {
|
||||
type Error = ImportError;
|
||||
|
||||
fn try_from(stream_id: &StreamId) -> Result<Self, Self::Error> {
|
||||
let task_str = match stream_id.map.get("task") {
|
||||
Some(value) => match value {
|
||||
Value::Data(data) => String::from_utf8_lossy(data).to_string(),
|
||||
_ => {
|
||||
error!("Unexpected value type for task field: {:?}", value);
|
||||
return Err(ImportError::Internal(anyhow!(
|
||||
"Unexpected value type for task field: {:?}",
|
||||
value
|
||||
)));
|
||||
},
|
||||
},
|
||||
None => {
|
||||
error!("Task field not found in Redis stream entry");
|
||||
return Err(ImportError::Internal(anyhow!(
|
||||
"Task field not found in Redis stream entry"
|
||||
)));
|
||||
},
|
||||
};
|
||||
|
||||
from_str::<ImportTask>(&task_str).map_err(|err| ImportError::Internal(err.into()))
|
||||
}
|
||||
}
|
3
services/appflowy-worker/src/lib.rs
Normal file
3
services/appflowy-worker/src/lib.rs
Normal file
|
@ -0,0 +1,3 @@
|
|||
pub mod error;
|
||||
pub mod import_worker;
|
||||
pub mod s3_client;
|
16
services/appflowy-worker/src/main.rs
Normal file
16
services/appflowy-worker/src/main.rs
Normal file
|
@ -0,0 +1,16 @@
|
|||
mod application;
|
||||
mod config;
|
||||
pub mod error;
|
||||
pub mod import_worker;
|
||||
pub(crate) mod s3_client;
|
||||
|
||||
use crate::application::run_server;
|
||||
use crate::config::Config;
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let listener = TcpListener::bind("0.0.0.0:4001").await.unwrap();
|
||||
let config = Config::from_env().expect("failed to load config");
|
||||
run_server(listener, config).await
|
||||
}
|
121
services/appflowy-worker/src/s3_client.rs
Normal file
121
services/appflowy-worker/src/s3_client.rs
Normal file
|
@ -0,0 +1,121 @@
|
|||
use crate::error::WorkerError;
|
||||
use anyhow::anyhow;
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
|
||||
use aws_sdk_s3::operation::get_object::GetObjectError;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use axum::async_trait;
|
||||
use std::ops::Deref;
|
||||
use tokio_util::compat::TokioAsyncReadCompatExt;
|
||||
|
||||
#[async_trait]
|
||||
pub trait S3Client: Send + Sync {
|
||||
async fn get_blob(&self, object_key: &str) -> Result<S3StreamResponse, WorkerError>;
|
||||
async fn put_blob(
|
||||
&self,
|
||||
object_key: &str,
|
||||
content: ByteStream,
|
||||
content_type: Option<&str>,
|
||||
) -> Result<(), WorkerError>;
|
||||
async fn delete_blob(&self, object_key: &str) -> Result<(), WorkerError>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct S3ClientImpl {
|
||||
pub inner: aws_sdk_s3::Client,
|
||||
pub bucket: String,
|
||||
}
|
||||
|
||||
impl Deref for S3ClientImpl {
|
||||
type Target = aws_sdk_s3::Client;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl S3Client for S3ClientImpl {
|
||||
async fn get_blob(&self, object_key: &str) -> Result<S3StreamResponse, WorkerError> {
|
||||
match self
|
||||
.inner
|
||||
.get_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(object_key)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(output) => {
|
||||
let stream = output.body.into_async_read().compat();
|
||||
let content_type = output.content_type;
|
||||
Ok(S3StreamResponse {
|
||||
stream: Box::new(stream),
|
||||
content_type,
|
||||
})
|
||||
},
|
||||
Err(SdkError::ServiceError(service_err)) => match service_err.err() {
|
||||
GetObjectError::NoSuchKey(_) => Err(WorkerError::RecordNotFound(format!(
|
||||
"blob not found for key:{object_key}"
|
||||
))),
|
||||
_ => Err(WorkerError::from(anyhow!(
|
||||
"Failed to get object from S3: {:?}",
|
||||
service_err
|
||||
))),
|
||||
},
|
||||
Err(err) => Err(WorkerError::from(anyhow!(
|
||||
"Failed to get object from S3: {}",
|
||||
err
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn put_blob(
|
||||
&self,
|
||||
object_key: &str,
|
||||
content: ByteStream,
|
||||
content_type: Option<&str>,
|
||||
) -> Result<(), WorkerError> {
|
||||
match self
|
||||
.inner
|
||||
.put_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(object_key)
|
||||
.body(content)
|
||||
.content_type(content_type.unwrap_or("application/octet-stream"))
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(WorkerError::from(anyhow!(
|
||||
"Failed to put object to S3: {}",
|
||||
err
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_blob(&self, object_key: &str) -> Result<(), WorkerError> {
|
||||
match self
|
||||
.inner
|
||||
.delete_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(object_key)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(_) => Ok(()),
|
||||
Err(SdkError::ServiceError(service_err)) => Err(WorkerError::from(anyhow!(
|
||||
"Failed to delete object from S3: {:?}",
|
||||
service_err
|
||||
))),
|
||||
Err(err) => Err(WorkerError::from(anyhow!(
|
||||
"Failed to delete object from S3: {}",
|
||||
err
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct S3StreamResponse {
|
||||
pub stream: Box<dyn futures::AsyncBufRead + Unpin + Send>,
|
||||
pub content_type: Option<String>,
|
||||
}
|
238
services/appflowy-worker/tests/import_test.rs
Normal file
238
services/appflowy-worker/tests/import_test.rs
Normal file
|
@ -0,0 +1,238 @@
|
|||
use anyhow::Result;
|
||||
use appflowy_worker::error::WorkerError;
|
||||
use appflowy_worker::import_worker::report::{ImportNotifier, ImportProgress};
|
||||
use appflowy_worker::import_worker::worker::{run_import_worker, ImportTask};
|
||||
use appflowy_worker::s3_client::{S3Client, S3StreamResponse};
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use axum::async_trait;
|
||||
|
||||
use redis::aio::ConnectionManager;
|
||||
use redis::AsyncCommands;
|
||||
use redis::RedisResult;
|
||||
use serde_json::json;
|
||||
use sqlx::PgPool;
|
||||
use sqlx::__rt::timeout;
|
||||
use std::sync::{Arc, Once};
|
||||
use std::time::Duration;
|
||||
use tokio::runtime::Builder;
|
||||
use tokio::task::LocalSet;
|
||||
|
||||
use tracing_subscriber::fmt::Subscriber;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn create_custom_task_test(pg_pool: PgPool) {
|
||||
let redis_client = redis_connection_manager().await;
|
||||
let stream_name = uuid::Uuid::new_v4().to_string();
|
||||
let notifier = Arc::new(MockNotifier::new());
|
||||
let mut task_provider = MockTaskProvider::new(redis_client.clone(), stream_name.clone());
|
||||
let _ = run_importer_worker(
|
||||
pg_pool,
|
||||
redis_client.clone(),
|
||||
notifier.clone(),
|
||||
stream_name,
|
||||
3,
|
||||
);
|
||||
|
||||
let mut task_workspace_ids = vec![];
|
||||
// generate 5 tasks
|
||||
for _ in 0..5 {
|
||||
let workspace_id = uuid::Uuid::new_v4().to_string();
|
||||
task_workspace_ids.push(workspace_id.clone());
|
||||
task_provider
|
||||
.create_task(ImportTask::Custom(json!({"workspace_id": workspace_id})))
|
||||
.await;
|
||||
}
|
||||
|
||||
let mut rx = notifier.subscribe();
|
||||
timeout(Duration::from_secs(30), async {
|
||||
while let Ok(task) = rx.recv().await {
|
||||
task_workspace_ids.retain(|id| {
|
||||
if let ImportProgress::Finished(result) = &task {
|
||||
if result.workspace_id == *id {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
});
|
||||
|
||||
if task_workspace_ids.is_empty() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// #[tokio::test]
|
||||
// async fn consume_group_task_test() {
|
||||
// let mut redis_client = redis_client().await;
|
||||
// let stream_name = format!("import_task_stream_{}", uuid::Uuid::new_v4());
|
||||
// let consumer_group = "import_task_group";
|
||||
// let consumer_name = "appflowy_worker";
|
||||
// let workspace_id = uuid::Uuid::new_v4().to_string();
|
||||
// let user_uuid = uuid::Uuid::new_v4().to_string();
|
||||
//
|
||||
// let _: RedisResult<()> = redis_client.xgroup_create_mkstream(&stream_name, consumer_group, "0");
|
||||
// // 1. insert a task
|
||||
// let task = json!({
|
||||
// "notion": {
|
||||
// "uid": 1,
|
||||
// "user_uuid": user_uuid,
|
||||
// "workspace_id": workspace_id,
|
||||
// "s3_key": workspace_id,
|
||||
// "file_type": "zip",
|
||||
// "host": "http::localhost",
|
||||
// }
|
||||
// });
|
||||
//
|
||||
// let _: () = redis_client
|
||||
// .xadd(&stream_name, "*", &[("task", task.to_string())])
|
||||
// .unwrap();
|
||||
// tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
//
|
||||
// // 2. consume a task
|
||||
// let options = StreamReadOptions::default()
|
||||
// .group(consumer_group, consumer_name)
|
||||
// .count(3);
|
||||
//
|
||||
// let tasks: StreamReadReply = redis_client
|
||||
// .xread_options(&[&stream_name], &[">"], &options)
|
||||
// .unwrap();
|
||||
// assert!(!tasks.keys.is_empty());
|
||||
//
|
||||
// for stream_key in tasks.keys {
|
||||
// for stream_id in stream_key.ids {
|
||||
// let task_str = match stream_id.map.get("task") {
|
||||
// Some(value) => match value {
|
||||
// Value::Data(data) => String::from_utf8_lossy(data).to_string(),
|
||||
// _ => panic!("Task field is not a string"),
|
||||
// },
|
||||
// None => continue,
|
||||
// };
|
||||
//
|
||||
// let _ = from_str::<ImportTask>(&task_str).unwrap();
|
||||
// let _: () = redis_client
|
||||
// .xack(&stream_name, consumer_group, &[stream_id.id.clone()])
|
||||
// .unwrap();
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
pub async fn redis_connection_manager() -> redis::aio::ConnectionManager {
|
||||
let redis_uri = "redis://localhost:6379";
|
||||
redis::Client::open(redis_uri)
|
||||
.expect("failed to create redis client")
|
||||
.get_connection_manager()
|
||||
.await
|
||||
.expect("failed to get redis connection manager")
|
||||
}
|
||||
|
||||
fn run_importer_worker(
|
||||
pg_pool: PgPool,
|
||||
redis_client: ConnectionManager,
|
||||
notifier: Arc<dyn ImportNotifier>,
|
||||
stream_name: String,
|
||||
tick_interval_secs: u64,
|
||||
) -> std::thread::JoinHandle<()> {
|
||||
setup_log();
|
||||
|
||||
std::thread::spawn(move || {
|
||||
let runtime = Builder::new_current_thread().enable_all().build().unwrap();
|
||||
let local_set = LocalSet::new();
|
||||
let import_worker_fut = local_set.run_until(run_import_worker(
|
||||
pg_pool,
|
||||
redis_client,
|
||||
Arc::new(MockS3Client),
|
||||
notifier,
|
||||
&stream_name,
|
||||
tick_interval_secs,
|
||||
));
|
||||
runtime.block_on(import_worker_fut).unwrap();
|
||||
})
|
||||
}
|
||||
|
||||
struct MockTaskProvider {
|
||||
redis_client: ConnectionManager,
|
||||
stream_name: String,
|
||||
}
|
||||
|
||||
impl MockTaskProvider {
|
||||
fn new(redis_client: ConnectionManager, stream_name: String) -> Self {
|
||||
Self {
|
||||
redis_client,
|
||||
stream_name,
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_task(&mut self, task: ImportTask) {
|
||||
let task = serde_json::to_string(&task).unwrap();
|
||||
let result: RedisResult<()> = self
|
||||
.redis_client
|
||||
.xadd(&self.stream_name, "*", &[("task", task.to_string())])
|
||||
.await;
|
||||
result.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
struct MockNotifier {
|
||||
tx: tokio::sync::broadcast::Sender<ImportProgress>,
|
||||
}
|
||||
|
||||
impl MockNotifier {
|
||||
fn new() -> Self {
|
||||
let (tx, _) = tokio::sync::broadcast::channel(100);
|
||||
Self { tx }
|
||||
}
|
||||
fn subscribe(&self) -> tokio::sync::broadcast::Receiver<ImportProgress> {
|
||||
self.tx.subscribe()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ImportNotifier for MockNotifier {
|
||||
async fn notify_progress(&self, progress: ImportProgress) {
|
||||
println!("notify_progress: {:?}", progress);
|
||||
self.tx.send(progress).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
struct MockS3Client;
|
||||
|
||||
#[async_trait]
|
||||
impl S3Client for MockS3Client {
|
||||
async fn get_blob(&self, _object_key: &str) -> Result<S3StreamResponse, WorkerError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn put_blob(
|
||||
&self,
|
||||
_object_key: &str,
|
||||
_content: ByteStream,
|
||||
_content_type: Option<&str>,
|
||||
) -> std::result::Result<(), WorkerError> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn delete_blob(&self, _object_key: &str) -> Result<(), WorkerError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setup_log() {
|
||||
static START: Once = Once::new();
|
||||
START.call_once(|| {
|
||||
let level = std::env::var("RUST_LOG").unwrap_or("trace".to_string());
|
||||
let mut filters = vec![];
|
||||
filters.push(format!("appflowy_worker={}", level));
|
||||
std::env::set_var("RUST_LOG", filters.join(","));
|
||||
|
||||
let subscriber = Subscriber::builder()
|
||||
.with_ansi(true)
|
||||
.with_env_filter(EnvFilter::from_default_env())
|
||||
.finish();
|
||||
subscriber.try_init().unwrap();
|
||||
});
|
||||
}
|
1
services/appflowy-worker/tests/main.rs
Normal file
1
services/appflowy-worker/tests/main.rs
Normal file
|
@ -0,0 +1 @@
|
|||
mod import_test;
|
|
@ -301,6 +301,7 @@ async fn answer_stream_v2_handler(
|
|||
{
|
||||
Ok(answer_stream) => {
|
||||
let new_answer_stream = answer_stream.map_err(AppError::from);
|
||||
|
||||
Ok(
|
||||
HttpResponse::Ok()
|
||||
.content_type("text/event-stream")
|
||||
|
|
141
src/api/data_import.rs
Normal file
141
src/api/data_import.rs
Normal file
|
@ -0,0 +1,141 @@
|
|||
use crate::state::AppState;
|
||||
use actix_multipart::Multipart;
|
||||
use actix_web::web::Data;
|
||||
use actix_web::{web, HttpRequest, Scope};
|
||||
use anyhow::anyhow;
|
||||
use app_error::AppError;
|
||||
use authentication::jwt::UserUuid;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use database::file::BucketClient;
|
||||
|
||||
use crate::biz::workspace::ops::{create_empty_workspace, create_upload_task};
|
||||
use database::workspace::select_import_task;
|
||||
use futures_util::StreamExt;
|
||||
use shared_entity::dto::import_dto::{ImportTaskDetail, ImportTaskStatus, UserImportTask};
|
||||
use shared_entity::response::{AppResponse, JsonAppResponse};
|
||||
use std::env::temp_dir;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tracing::{error, trace};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub fn data_import_scope() -> Scope {
|
||||
web::scope("/api/import").service(
|
||||
web::resource("")
|
||||
.route(web::post().to(import_data_handler))
|
||||
.route(web::get().to(get_import_detail_handler)),
|
||||
)
|
||||
}
|
||||
|
||||
async fn get_import_detail_handler(
|
||||
user_uuid: UserUuid,
|
||||
state: Data<AppState>,
|
||||
) -> actix_web::Result<JsonAppResponse<UserImportTask>> {
|
||||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
let tasks = select_import_task(uid, &state.pg_pool, None)
|
||||
.await
|
||||
.map(|tasks| {
|
||||
tasks
|
||||
.into_iter()
|
||||
.map(|task| ImportTaskDetail {
|
||||
task_id: task.task_id.to_string(),
|
||||
file_size: task.file_size as u64,
|
||||
created_at: task.created_at.timestamp(),
|
||||
status: ImportTaskStatus::from(task.status),
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
})?;
|
||||
|
||||
Ok(
|
||||
AppResponse::Ok()
|
||||
.with_data(UserImportTask {
|
||||
tasks,
|
||||
has_more: false,
|
||||
})
|
||||
.into(),
|
||||
)
|
||||
}
|
||||
|
||||
async fn import_data_handler(
|
||||
user_uuid: UserUuid,
|
||||
state: Data<AppState>,
|
||||
mut payload: Multipart,
|
||||
req: HttpRequest,
|
||||
) -> actix_web::Result<JsonAppResponse<()>> {
|
||||
let uid = state.user_cache.get_user_uid(&user_uuid).await?;
|
||||
let host = get_host_from_request(&req);
|
||||
|
||||
let time = chrono::Local::now().format("%d/%m/%Y %H:%M").to_string();
|
||||
let workspace_name = format!("import-{}", time);
|
||||
|
||||
// file_name must be unique
|
||||
let file_name = format!("{}.zip", Uuid::new_v4());
|
||||
let file_path = temp_dir().join(&file_name);
|
||||
|
||||
let mut file_size = 0;
|
||||
let mut file = File::create(&file_path).await?;
|
||||
while let Some(item) = payload.next().await {
|
||||
let mut field = item?;
|
||||
while let Some(chunk) = field.next().await {
|
||||
let data = chunk?;
|
||||
file_size += data.len();
|
||||
file.write_all(&data).await?;
|
||||
}
|
||||
}
|
||||
file.shutdown().await?;
|
||||
drop(file);
|
||||
|
||||
let workspace = create_empty_workspace(
|
||||
&state.pg_pool,
|
||||
&state.workspace_access_control,
|
||||
&state.collab_access_control_storage,
|
||||
&user_uuid,
|
||||
uid,
|
||||
&workspace_name,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let workspace_id = workspace.workspace_id.to_string();
|
||||
trace!(
|
||||
"User:{} import data:{} to new workspace:{}",
|
||||
uid,
|
||||
file_size,
|
||||
workspace_id
|
||||
);
|
||||
let stream = ByteStream::from_path(&file_path).await.map_err(|e| {
|
||||
AppError::Internal(anyhow!("Failed to create ByteStream from file path: {}", e))
|
||||
})?;
|
||||
state
|
||||
.bucket_client
|
||||
.put_blob_as_content_type(&workspace_id, stream, "zip")
|
||||
.await?;
|
||||
|
||||
// delete the file after uploading
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = tokio::fs::remove_file(file_path).await {
|
||||
error!("Failed to delete file after uploading: {}", err);
|
||||
}
|
||||
});
|
||||
|
||||
create_upload_task(
|
||||
uid,
|
||||
&user_uuid,
|
||||
&workspace_id,
|
||||
file_size,
|
||||
&host,
|
||||
&state.redis_connection_manager,
|
||||
&state.pg_pool,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(AppResponse::Ok().into())
|
||||
}
|
||||
|
||||
fn get_host_from_request(req: &HttpRequest) -> String {
|
||||
req
|
||||
.headers()
|
||||
.get("X-Host")
|
||||
.and_then(|h| h.to_str().ok())
|
||||
.unwrap_or("https://beta.appflowy.cloud")
|
||||
.to_string()
|
||||
}
|
|
@ -1,8 +1,8 @@
|
|||
pub mod access_request;
|
||||
pub mod ai;
|
||||
pub mod chat;
|
||||
pub mod data_import;
|
||||
pub mod file_storage;
|
||||
|
||||
pub mod access_request;
|
||||
pub mod history;
|
||||
pub mod metrics;
|
||||
pub mod search;
|
||||
|
|
|
@ -29,13 +29,13 @@ use appflowy_collaborate::actix_ws::server::RealtimeServerActor;
|
|||
use appflowy_collaborate::collab::access_control::{
|
||||
CollabAccessControlImpl, CollabStorageAccessControlImpl, RealtimeCollabAccessControlImpl,
|
||||
};
|
||||
use appflowy_collaborate::collab::cache::CollabCache;
|
||||
use appflowy_collaborate::collab::storage::CollabStorageImpl;
|
||||
use appflowy_collaborate::command::{CLCommandReceiver, CLCommandSender};
|
||||
use appflowy_collaborate::indexer::IndexerProvider;
|
||||
use appflowy_collaborate::shared_state::RealtimeSharedState;
|
||||
use appflowy_collaborate::snapshot::SnapshotControl;
|
||||
use appflowy_collaborate::CollaborationServer;
|
||||
use database::collab::cache::CollabCache;
|
||||
use database::file::s3_client_impl::{AwsS3BucketClientImpl, S3BucketStorage};
|
||||
use gotrue::grant::{Grant, PasswordGrant};
|
||||
use snowflake::Snowflake;
|
||||
|
@ -45,6 +45,7 @@ use workspace_access::WorkspaceAccessControlImpl;
|
|||
use crate::api::access_request::access_request_scope;
|
||||
use crate::api::ai::ai_completion_scope;
|
||||
use crate::api::chat::chat_scope;
|
||||
use crate::api::data_import::data_import_scope;
|
||||
use crate::api::file_storage::file_storage_scope;
|
||||
use crate::api::history::history_scope;
|
||||
use crate::api::metrics::metrics_scope;
|
||||
|
@ -172,6 +173,7 @@ pub async fn run_actix_server(
|
|||
.service(metrics_scope())
|
||||
.service(search_scope())
|
||||
.service(template_scope())
|
||||
.service(data_import_scope())
|
||||
.service(access_request_scope())
|
||||
.app_data(Data::new(state.metrics.registry.clone()))
|
||||
.app_data(Data::new(state.metrics.request_metrics.clone()))
|
||||
|
|
|
@ -9,7 +9,7 @@ use tracing::{instrument, trace};
|
|||
use access_control::act::Action;
|
||||
use access_control::collab::CollabAccessControl;
|
||||
use app_error::AppError;
|
||||
use appflowy_collaborate::collab::cache::CollabCache;
|
||||
use database::collab::cache::CollabCache;
|
||||
use database_entity::dto::AFAccessLevel;
|
||||
|
||||
use crate::api::workspace::{COLLAB_PATTERN, V1_COLLAB_PATTERN};
|
||||
|
|
|
@ -3,6 +3,7 @@ use std::{ops::DerefMut, path::Path};
|
|||
use actix_multipart::form::bytes::Bytes as MPBytes;
|
||||
use anyhow::Context;
|
||||
use app_error::ErrorCode;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use database::{
|
||||
file::{s3_client_impl::AwsS3BucketClientImpl, BucketClient, ResponseBlob},
|
||||
template::*,
|
||||
|
@ -339,7 +340,11 @@ pub async fn upload_avatar(
|
|||
|
||||
let object_key = avatar_object_key(&file_id);
|
||||
client
|
||||
.put_blob_as_content_type(&object_key, avatar.data.as_ref(), &content_type)
|
||||
.put_blob_as_content_type(
|
||||
&object_key,
|
||||
ByteStream::from(avatar.data.to_vec()),
|
||||
&content_type,
|
||||
)
|
||||
.await?;
|
||||
Ok(file_id.to_string())
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ use collab::core::origin::CollabOrigin;
|
|||
use collab::preclude::Collab;
|
||||
use collab_database::workspace_database::WorkspaceDatabaseBody;
|
||||
use collab_entity::CollabType;
|
||||
use collab_folder::{Folder, FolderData, Workspace};
|
||||
use collab_user::core::UserAwareness;
|
||||
use database::collab::CollabStorage;
|
||||
use database::pg_row::AFWorkspaceRow;
|
||||
|
@ -111,7 +112,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_user_awareness(
|
||||
pub(crate) async fn create_user_awareness(
|
||||
uid: &i64,
|
||||
user_uuid: &Uuid,
|
||||
workspace_id: &str,
|
||||
|
@ -147,7 +148,43 @@ async fn create_user_awareness(
|
|||
Ok(object_id)
|
||||
}
|
||||
|
||||
async fn create_workspace_database_collab(
|
||||
pub(crate) async fn create_workspace_collab(
|
||||
uid: i64,
|
||||
workspace_id: &str,
|
||||
name: &str,
|
||||
storage: &Arc<CollabAccessControlStorage>,
|
||||
txn: &mut Transaction<'_, sqlx::Postgres>,
|
||||
) -> Result<(), AppError> {
|
||||
let workspace = Workspace::new(workspace_id.to_string(), name.to_string(), uid);
|
||||
let folder_data = FolderData::new(workspace);
|
||||
|
||||
let collab = Collab::new_with_origin(CollabOrigin::Empty, workspace_id, vec![], false);
|
||||
let folder = Folder::create(uid, collab, None, folder_data);
|
||||
let encode_collab = folder
|
||||
.encode_collab()
|
||||
.map_err(|err| AppError::Internal(err.into()))?;
|
||||
|
||||
let encoded_collab_v1 = encode_collab
|
||||
.encode_to_bytes()
|
||||
.map_err(|err| AppError::Internal(anyhow::Error::from(err)))?;
|
||||
|
||||
storage
|
||||
.insert_new_collab_with_transaction(
|
||||
workspace_id,
|
||||
&uid,
|
||||
CollabParams {
|
||||
object_id: workspace_id.to_string(),
|
||||
encoded_collab_v1: encoded_collab_v1.into(),
|
||||
collab_type: CollabType::Folder,
|
||||
embeddings: None,
|
||||
},
|
||||
txn,
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn create_workspace_database_collab(
|
||||
workspace_id: &str,
|
||||
uid: &i64,
|
||||
object_id: &str,
|
||||
|
@ -156,17 +193,13 @@ async fn create_workspace_database_collab(
|
|||
initial_database_records: Vec<(String, String)>,
|
||||
) -> Result<(), AppError> {
|
||||
let collab_type = CollabType::WorkspaceDatabase;
|
||||
let mut collab = Collab::new_with_origin(CollabOrigin::Empty, object_id, vec![], false);
|
||||
{
|
||||
let workspace_database_body = WorkspaceDatabaseBody::create(&mut collab);
|
||||
let mut txn = collab.context.transact_mut();
|
||||
for (object_id, database_id) in initial_database_records {
|
||||
workspace_database_body.add_database(&mut txn, &database_id, vec![object_id]);
|
||||
}
|
||||
};
|
||||
|
||||
let encode_collab = collab
|
||||
.encode_collab_v1(|collab| collab_type.validate_require_data(collab))
|
||||
let collab = Collab::new_with_origin(CollabOrigin::Empty, object_id, vec![], false);
|
||||
let mut workspace_database_body = WorkspaceDatabaseBody::create(collab);
|
||||
for (object_id, database_id) in initial_database_records {
|
||||
workspace_database_body.add_database(&database_id, vec![object_id]);
|
||||
}
|
||||
let encode_collab = workspace_database_body
|
||||
.encode_collab_v1()
|
||||
.map_err(|err| AppError::Internal(err.into()))?;
|
||||
|
||||
let encoded_collab_v1 = encode_collab
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
use authentication::jwt::OptionalUserUuid;
|
||||
use authentication::jwt::{OptionalUserUuid, UserUuid};
|
||||
use database_entity::dto::AFWorkspaceSettingsChange;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::{anyhow, Context};
|
||||
use redis::AsyncCommands;
|
||||
use serde_json::json;
|
||||
use sqlx::{types::uuid, PgPool};
|
||||
use tracing::instrument;
|
||||
use uuid::Uuid;
|
||||
|
@ -30,9 +32,12 @@ use shared_entity::dto::workspace_dto::{
|
|||
use shared_entity::response::AppResponseError;
|
||||
use workspace_template::document::getting_started::GettingStartedTemplate;
|
||||
|
||||
use crate::biz::user::user_init::initialize_workspace_for_user;
|
||||
use crate::biz::user::user_init::{
|
||||
create_user_awareness, create_workspace_collab, create_workspace_database_collab,
|
||||
initialize_workspace_for_user,
|
||||
};
|
||||
use crate::mailer::{Mailer, WorkspaceInviteMailerParam};
|
||||
use crate::state::GoTrueAdmin;
|
||||
use crate::state::{GoTrueAdmin, RedisConnectionManager};
|
||||
|
||||
const MAX_COMMENT_LENGTH: usize = 5000;
|
||||
|
||||
|
@ -55,6 +60,61 @@ pub async fn delete_workspace_for_user(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Create an empty workspace with default folder, workspace database and user awareness collab
|
||||
/// object.
|
||||
pub async fn create_empty_workspace(
|
||||
pg_pool: &PgPool,
|
||||
workspace_access_control: &impl WorkspaceAccessControl,
|
||||
collab_storage: &Arc<CollabAccessControlStorage>,
|
||||
user_uuid: &Uuid,
|
||||
user_uid: i64,
|
||||
workspace_name: &str,
|
||||
) -> Result<AFWorkspace, AppResponseError> {
|
||||
let mut txn = pg_pool.begin().await?;
|
||||
let new_workspace_row = insert_user_workspace(&mut txn, user_uuid, workspace_name, false).await?;
|
||||
workspace_access_control
|
||||
.insert_role(&user_uid, &new_workspace_row.workspace_id, AFRole::Owner)
|
||||
.await?;
|
||||
let workspace_id = new_workspace_row.workspace_id.to_string();
|
||||
|
||||
// create CollabType::Folder
|
||||
create_workspace_collab(
|
||||
user_uid,
|
||||
&workspace_id,
|
||||
workspace_name,
|
||||
collab_storage,
|
||||
&mut txn,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// create CollabType::WorkspaceDatabase
|
||||
if let Some(database_storage_id) = new_workspace_row.database_storage_id.as_ref() {
|
||||
let workspace_database_object_id = database_storage_id.to_string();
|
||||
create_workspace_database_collab(
|
||||
&workspace_id,
|
||||
&user_uid,
|
||||
&workspace_database_object_id,
|
||||
collab_storage,
|
||||
&mut txn,
|
||||
vec![],
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// create CollabType::UserAwareness
|
||||
create_user_awareness(
|
||||
&user_uid,
|
||||
user_uuid,
|
||||
&workspace_id,
|
||||
collab_storage,
|
||||
&mut txn,
|
||||
)
|
||||
.await?;
|
||||
let new_workspace = AFWorkspace::try_from(new_workspace_row)?;
|
||||
txn.commit().await?;
|
||||
Ok(new_workspace)
|
||||
}
|
||||
|
||||
pub async fn create_workspace_for_user(
|
||||
pg_pool: &PgPool,
|
||||
workspace_access_control: &impl WorkspaceAccessControl,
|
||||
|
@ -64,7 +124,7 @@ pub async fn create_workspace_for_user(
|
|||
workspace_name: &str,
|
||||
) -> Result<AFWorkspace, AppResponseError> {
|
||||
let mut txn = pg_pool.begin().await?;
|
||||
let new_workspace_row = insert_user_workspace(&mut txn, user_uuid, workspace_name).await?;
|
||||
let new_workspace_row = insert_user_workspace(&mut txn, user_uuid, workspace_name, true).await?;
|
||||
|
||||
workspace_access_control
|
||||
.insert_role(&user_uid, &new_workspace_row.workspace_id, AFRole::Owner)
|
||||
|
@ -633,3 +693,45 @@ async fn check_if_user_is_allowed_to_delete_comment(
|
|||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn create_upload_task(
|
||||
uid: i64,
|
||||
user_uuid: &UserUuid,
|
||||
workspace_id: &str,
|
||||
file_size: usize,
|
||||
host: &str,
|
||||
redis_client: &RedisConnectionManager,
|
||||
pg_pool: &PgPool,
|
||||
) -> Result<(), AppError> {
|
||||
let task_id = Uuid::new_v4();
|
||||
|
||||
// Insert the task into the database
|
||||
insert_import_task(
|
||||
task_id,
|
||||
file_size as i64,
|
||||
workspace_id.to_string(),
|
||||
uid,
|
||||
Some(json!({"host": host})),
|
||||
pg_pool,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// This task will be deserialized into ImportTask
|
||||
let task = json!({
|
||||
"notion": {
|
||||
"uid": uid,
|
||||
"user_uuid": user_uuid,
|
||||
"task_id": task_id,
|
||||
"workspace_id": workspace_id,
|
||||
"s3_key": workspace_id,
|
||||
"host": host,
|
||||
}
|
||||
});
|
||||
let _: () = redis_client
|
||||
.clone()
|
||||
.xadd("import_task_stream", "*", &[("task", task.to_string())])
|
||||
.await
|
||||
.map_err(|err| AppError::Internal(anyhow!("Failed to push task to Redis stream: {}", err)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use app_error::ErrorCode;
|
||||
use anyhow::anyhow;
|
||||
use app_error::{AppError, ErrorCode};
|
||||
use appflowy_collaborate::collab::storage::CollabAccessControlStorage;
|
||||
use chrono::DateTime;
|
||||
use collab::core::collab::Collab;
|
||||
|
@ -18,6 +16,8 @@ use rayon::iter::{IntoParallelIterator, ParallelIterator};
|
|||
use shared_entity::dto::workspace_dto::{FolderView, PageCollab, PageCollabData};
|
||||
use shared_entity::response::AppResponseError;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::biz::collab::folder_view::{
|
||||
|
@ -131,12 +131,13 @@ async fn get_page_collab_data_for_database(
|
|||
CollabType::WorkspaceDatabase,
|
||||
)
|
||||
.await?;
|
||||
let mut ws_db_collab = collab_from_doc_state(ws_db.doc_state.to_vec(), &ws_db_oid)?;
|
||||
let ws_db_body = WorkspaceDatabaseBody::open(&mut ws_db_collab);
|
||||
let ws_db_collab = collab_from_doc_state(ws_db.doc_state.to_vec(), &ws_db_oid)?;
|
||||
let ws_db_body = WorkspaceDatabaseBody::open(ws_db_collab).map_err(|err| {
|
||||
AppError::Internal(anyhow!("Failed to open workspace database body: {}", err))
|
||||
})?;
|
||||
let db_oid = {
|
||||
let txn = ws_db_collab.transact();
|
||||
ws_db_body
|
||||
.get_database_meta_with_view_id(&txn, view_id)
|
||||
.get_database_meta_with_view_id(view_id)
|
||||
.ok_or(AppResponseError::new(
|
||||
ErrorCode::NoRequiredData,
|
||||
format!("Database view {} not found", view_id),
|
||||
|
|
|
@ -3,6 +3,8 @@ use appflowy_collaborate::collab::storage::CollabAccessControlStorage;
|
|||
use collab::core::collab::DataSource;
|
||||
use collab::preclude::Collab;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use bytes::Bytes;
|
||||
use collab_database::database::gen_row_id;
|
||||
use collab_database::database::DatabaseBody;
|
||||
use collab_database::entity::FieldType;
|
||||
|
@ -188,7 +190,7 @@ impl PublishCollabDuplicator {
|
|||
// update database if any
|
||||
if !workspace_databases.is_empty() {
|
||||
let ws_db_oid = select_workspace_database_oid(&pg_pool, &dest_workspace_id.parse()?).await?;
|
||||
let mut ws_db_collab = {
|
||||
let ws_db_collab = {
|
||||
let ws_database_ec = get_latest_collab_encoded(
|
||||
collab_storage.clone(),
|
||||
GetCollabOrigin::User {
|
||||
|
@ -202,27 +204,41 @@ impl PublishCollabDuplicator {
|
|||
collab_from_doc_state(ws_database_ec.doc_state.to_vec(), &ws_db_oid)?
|
||||
};
|
||||
|
||||
let ws_db_body = WorkspaceDatabaseBody::open(&mut ws_db_collab);
|
||||
let mut ws_db_body = WorkspaceDatabaseBody::open(ws_db_collab).map_err(|err| {
|
||||
AppError::Unhandled(format!("failed to open workspace database: {}", err))
|
||||
})?;
|
||||
let (ws_db_updates, updated_ws_w_db_collab) = tokio::task::spawn_blocking(move || {
|
||||
let ws_db_updates = {
|
||||
let mut txn_wrapper = ws_db_collab.transact_mut();
|
||||
for (db_collab_id, linked_views) in &workspace_databases {
|
||||
ws_db_body.add_database(&mut txn_wrapper, db_collab_id, linked_views.clone());
|
||||
}
|
||||
txn_wrapper.encode_update_v1()
|
||||
let view_ids_by_database_id = workspace_databases
|
||||
.into_iter()
|
||||
.map(|(database_id, view_ids)| (database_id, view_ids.into_iter().collect()))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
ws_db_body
|
||||
.batch_add_database(view_ids_by_database_id)
|
||||
.encode_update_v1()
|
||||
};
|
||||
let updated_ws_w_db_collab = collab_to_bin(ws_db_collab, CollabType::WorkspaceDatabase);
|
||||
|
||||
let updated_ws_w_db_collab = ws_db_body
|
||||
.encode_collab_v1()
|
||||
.map(|encoded_collab| encoded_collab.encode_to_bytes().unwrap())
|
||||
.map_err(|err| {
|
||||
AppError::Internal(anyhow!("failed to encode workspace database: {}", err))
|
||||
});
|
||||
|
||||
(ws_db_updates, updated_ws_w_db_collab)
|
||||
})
|
||||
.await?;
|
||||
|
||||
let updated_ws_w_db_collab = updated_ws_w_db_collab?;
|
||||
|
||||
collab_storage
|
||||
.insert_new_collab_with_transaction(
|
||||
&dest_workspace_id,
|
||||
&duplicator_uid,
|
||||
CollabParams {
|
||||
object_id: ws_db_oid.clone(),
|
||||
encoded_collab_v1: updated_ws_w_db_collab.await?.into(),
|
||||
encoded_collab_v1: Bytes::from(updated_ws_w_db_collab),
|
||||
collab_type: CollabType::WorkspaceDatabase,
|
||||
embeddings: None,
|
||||
},
|
||||
|
|
|
@ -13,12 +13,12 @@ use access_control::metrics::AccessControlMetrics;
|
|||
use app_error::AppError;
|
||||
use appflowy_ai_client::client::AppFlowyAIClient;
|
||||
use appflowy_collaborate::collab::access_control::CollabAccessControlImpl;
|
||||
use appflowy_collaborate::collab::cache::CollabCache;
|
||||
use appflowy_collaborate::collab::storage::CollabAccessControlStorage;
|
||||
use appflowy_collaborate::indexer::IndexerProvider;
|
||||
use appflowy_collaborate::metrics::CollabMetrics;
|
||||
use appflowy_collaborate::shared_state::RealtimeSharedState;
|
||||
use appflowy_collaborate::CollabRealtimeMetrics;
|
||||
use database::collab::cache::CollabCache;
|
||||
use database::file::s3_client_impl::{AwsS3BucketClientImpl, S3BucketStorage};
|
||||
use database::user::{select_all_uid_uuid, select_uid_from_uuid};
|
||||
use gotrue::grant::{Grant, PasswordGrant};
|
||||
|
|
|
@ -12,11 +12,11 @@ use sqlx::PgPool;
|
|||
use tokio::time::sleep;
|
||||
|
||||
use app_error::ErrorCode;
|
||||
use appflowy_collaborate::collab::cache::CollabCache;
|
||||
use appflowy_collaborate::collab::mem_cache::CollabMemCache;
|
||||
use appflowy_collaborate::collab::queue::StorageQueue;
|
||||
use appflowy_collaborate::collab::WritePriority;
|
||||
use client_api_test::*;
|
||||
use database::collab::cache::CollabCache;
|
||||
use database::collab::mem_cache::CollabMemCache;
|
||||
use database::collab::CollabMetadata;
|
||||
use database_entity::dto::{
|
||||
CollabParams, CreateCollabParams, DeleteCollabParams, QueryCollab, QueryCollabParams,
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
use crate::sql_test::util::{generate_random_bytes, setup_db, test_create_user};
|
||||
|
||||
use collab_entity::CollabType;
|
||||
use database::collab::{insert_into_af_collab, select_collab_meta_from_af_collab};
|
||||
use database::collab::{
|
||||
insert_into_af_collab, insert_into_af_collab_bulk_for_user, select_blob_from_af_collab,
|
||||
select_collab_meta_from_af_collab,
|
||||
};
|
||||
use database_entity::dto::CollabParams;
|
||||
use sqlx::PgPool;
|
||||
|
||||
|
@ -18,12 +21,18 @@ async fn insert_collab_sql_test(pool: PgPool) {
|
|||
|
||||
let mut object_ids = vec![];
|
||||
|
||||
let data_sizes = vec![1024, 10240, 102400, 1024000]; // Example sizes: 1KB, 10KB, 100KB, 1MB
|
||||
let data_sizes = vec![
|
||||
5120, // 5 KB
|
||||
10240, // 10 KB
|
||||
102400, // 100 KB
|
||||
512000, // 500 KB
|
||||
5120000, // 5 MB
|
||||
];
|
||||
let start_time = std::time::Instant::now();
|
||||
for &data_size in &data_sizes {
|
||||
let encoded_collab_v1 = generate_random_bytes(data_size);
|
||||
let object_id = uuid::Uuid::new_v4().to_string();
|
||||
object_ids.push(object_id.clone());
|
||||
let start_time = std::time::Instant::now(); // Start timing
|
||||
let mut txn = pool.begin().await.unwrap();
|
||||
let params = CollabParams {
|
||||
object_id,
|
||||
|
@ -35,12 +44,9 @@ async fn insert_collab_sql_test(pool: PgPool) {
|
|||
.await
|
||||
.unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
let duration = start_time.elapsed(); // End timing
|
||||
println!(
|
||||
"Data size: {} bytes, Insert time: {:?}",
|
||||
data_size, duration
|
||||
);
|
||||
}
|
||||
let duration = start_time.elapsed();
|
||||
println!("Insert time: {:?}", duration);
|
||||
|
||||
for object_id in object_ids {
|
||||
let meta = select_collab_meta_from_af_collab(&pool, &object_id, &CollabType::Unknown)
|
||||
|
@ -54,3 +60,220 @@ async fn insert_collab_sql_test(pool: PgPool) {
|
|||
assert!(meta.deleted_at.is_none());
|
||||
}
|
||||
}
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn insert_bulk_collab_sql_test(pool: PgPool) {
|
||||
setup_db(&pool).await.unwrap();
|
||||
|
||||
let user_uuid = uuid::Uuid::new_v4();
|
||||
let name = user_uuid.to_string();
|
||||
let email = format!("{}@appflowy.io", name);
|
||||
let user = test_create_user(&pool, user_uuid, &email, &name)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut object_ids = vec![];
|
||||
let data_sizes = vec![
|
||||
5120, // 5 KB
|
||||
10240, // 10 KB
|
||||
102400, // 100 KB
|
||||
512000, // 500 KB
|
||||
5120000, // 5 MB
|
||||
];
|
||||
let mut collab_params_list = vec![];
|
||||
let mut original_data_list = vec![]; // Store original data for validation
|
||||
|
||||
// Prepare bulk insert data
|
||||
for &data_size in &data_sizes {
|
||||
let encoded_collab_v1 = generate_random_bytes(data_size);
|
||||
let object_id = uuid::Uuid::new_v4().to_string();
|
||||
object_ids.push(object_id.clone());
|
||||
|
||||
let params = CollabParams {
|
||||
object_id,
|
||||
collab_type: CollabType::Unknown,
|
||||
encoded_collab_v1: encoded_collab_v1.clone().into(), // Store the original data for validation
|
||||
embeddings: None,
|
||||
};
|
||||
|
||||
collab_params_list.push(params);
|
||||
original_data_list.push(encoded_collab_v1); // Keep track of original data
|
||||
}
|
||||
|
||||
// Perform bulk insert
|
||||
let start_time = std::time::Instant::now(); // Start timing
|
||||
let mut txn = pool.begin().await.unwrap();
|
||||
insert_into_af_collab_bulk_for_user(&mut txn, &user.uid, &user.workspace_id, &collab_params_list)
|
||||
.await
|
||||
.unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
let duration = start_time.elapsed();
|
||||
println!("Bulk insert time: {:?}", duration);
|
||||
|
||||
// Validate inserted data
|
||||
for (i, object_id) in object_ids.iter().enumerate() {
|
||||
let inserted_data = select_blob_from_af_collab(&pool, &CollabType::Unknown, object_id)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Ensure the inserted data matches the original data
|
||||
let original_data = &original_data_list[i];
|
||||
assert_eq!(
|
||||
inserted_data, *original_data,
|
||||
"Data mismatch for object_id: {}",
|
||||
object_id
|
||||
);
|
||||
println!(
|
||||
"Validated data size: {} bytes for object_id: {}",
|
||||
original_data.len(),
|
||||
object_id
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn test_bulk_insert_empty_collab_list(pool: PgPool) {
|
||||
setup_db(&pool).await.unwrap();
|
||||
|
||||
let user_uuid = uuid::Uuid::new_v4();
|
||||
let user = test_create_user(&pool, user_uuid, "test@appflowy.io", "test_user")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let collab_params_list: Vec<CollabParams> = vec![]; // Empty list
|
||||
let mut txn = pool.begin().await.unwrap();
|
||||
let result = insert_into_af_collab_bulk_for_user(
|
||||
&mut txn,
|
||||
&user.uid,
|
||||
&user.workspace_id,
|
||||
&collab_params_list,
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_ok());
|
||||
txn.commit().await.unwrap();
|
||||
}
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn test_bulk_insert_duplicate_oid_partition_key(pool: PgPool) {
|
||||
setup_db(&pool).await.unwrap();
|
||||
|
||||
let user_uuid = uuid::Uuid::new_v4();
|
||||
let user = test_create_user(&pool, user_uuid, "test@appflowy.io", "test_user")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let object_id = uuid::Uuid::new_v4().to_string();
|
||||
let encoded_collab_v1 = generate_random_bytes(1024); // 1KB of random data
|
||||
|
||||
// Two items with the same oid and partition_key
|
||||
let collab_params_list = vec![
|
||||
CollabParams {
|
||||
object_id: object_id.clone(),
|
||||
collab_type: CollabType::Unknown,
|
||||
encoded_collab_v1: encoded_collab_v1.clone().into(),
|
||||
embeddings: None,
|
||||
},
|
||||
CollabParams {
|
||||
object_id: object_id.clone(), // Duplicate oid
|
||||
collab_type: CollabType::Unknown,
|
||||
encoded_collab_v1: generate_random_bytes(2048).into(), // Different data to test update
|
||||
embeddings: None,
|
||||
},
|
||||
];
|
||||
|
||||
let mut txn = pool.begin().await.unwrap();
|
||||
insert_into_af_collab_bulk_for_user(&mut txn, &user.uid, &user.workspace_id, &collab_params_list)
|
||||
.await
|
||||
.unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
|
||||
// Validate the data was updated, not duplicated
|
||||
let data = select_blob_from_af_collab(&pool, &CollabType::Unknown, &object_id)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(data, encoded_collab_v1); // should equal the data that insert first time
|
||||
}
|
||||
|
||||
#[sqlx::test(migrations = false)]
|
||||
async fn test_batch_insert_comparison(pool: PgPool) {
|
||||
setup_db(&pool).await.unwrap();
|
||||
|
||||
let user_uuid = uuid::Uuid::new_v4();
|
||||
let user = test_create_user(&pool, user_uuid, "test@appflowy.io", "test_user")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Define the different test cases
|
||||
let row_sizes = vec![1024, 5 * 1024]; // 1KB and 5KB row sizes
|
||||
let total_rows = vec![500, 1000, 2000, 3000, 6000]; // Number of rows
|
||||
let chunk_sizes = vec![2000]; // Chunk size for batch inserts
|
||||
|
||||
// Iterate over the different row sizes
|
||||
for row_size in row_sizes {
|
||||
// Iterate over the different total row counts
|
||||
for &total_row_count in &total_rows {
|
||||
// Generate data for the total row count
|
||||
let collab_params_list: Vec<CollabParams> = (0..total_row_count)
|
||||
.map(|_| CollabParams {
|
||||
object_id: uuid::Uuid::new_v4().to_string(),
|
||||
collab_type: CollabType::Unknown,
|
||||
encoded_collab_v1: generate_random_bytes(row_size).into(), // Generate random bytes for the given row size
|
||||
embeddings: None,
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Group the results for readability
|
||||
println!("\n==============================");
|
||||
println!(
|
||||
"Row Size: {}KB, Total Rows: {}",
|
||||
row_size / 1024,
|
||||
total_row_count
|
||||
);
|
||||
|
||||
// === Test Case 1: Insert all rows in one batch ===
|
||||
let start_time = std::time::Instant::now();
|
||||
let mut txn = pool.begin().await.unwrap();
|
||||
let result = insert_into_af_collab_bulk_for_user(
|
||||
&mut txn,
|
||||
&user.uid,
|
||||
&user.workspace_id,
|
||||
&collab_params_list,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok()); // Ensure the insert doesn't fail
|
||||
txn.commit().await.unwrap();
|
||||
let total_time_single_batch = start_time.elapsed();
|
||||
println!(
|
||||
"Batch Insert - Time for inserting {} rows of size {}KB in one batch: {:?}",
|
||||
total_row_count,
|
||||
row_size / 1024,
|
||||
total_time_single_batch
|
||||
);
|
||||
|
||||
// === Test Case 2: Insert rows in chunks ===
|
||||
for &chunk_size in &chunk_sizes {
|
||||
let mut total_time_multiple_batches = std::time::Duration::new(0, 0);
|
||||
for chunk in collab_params_list.chunks(chunk_size) {
|
||||
let start_time = std::time::Instant::now();
|
||||
let mut txn = pool.begin().await.unwrap();
|
||||
let result =
|
||||
insert_into_af_collab_bulk_for_user(&mut txn, &user.uid, &user.workspace_id, chunk)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok()); // Ensure the insert doesn't fail
|
||||
txn.commit().await.unwrap();
|
||||
total_time_multiple_batches += start_time.elapsed();
|
||||
}
|
||||
println!(
|
||||
"Chunked Insert - Time for inserting {} rows of size {}KB in {}-row chunks: {:?}",
|
||||
total_row_count,
|
||||
row_size / 1024,
|
||||
chunk_size,
|
||||
total_time_multiple_batches
|
||||
);
|
||||
}
|
||||
|
||||
println!("==============================\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
BIN
tests/workspace/asset/blog_post.zip
Normal file
BIN
tests/workspace/asset/blog_post.zip
Normal file
Binary file not shown.
BIN
tests/workspace/asset/project&task.zip
Normal file
BIN
tests/workspace/asset/project&task.zip
Normal file
Binary file not shown.
167
tests/workspace/import_test.rs
Normal file
167
tests/workspace/import_test.rs
Normal file
|
@ -0,0 +1,167 @@
|
|||
use client_api_test::TestClient;
|
||||
use collab_document::importer::define::{BlockType, URL_FIELD};
|
||||
use collab_folder::ViewLayout;
|
||||
use shared_entity::dto::import_dto::ImportTaskStatus;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn import_blog_post_test() {
|
||||
let (client, imported_workspace_id) = import_zip("blog_post.zip").await;
|
||||
let folder = client.get_folder(&imported_workspace_id).await;
|
||||
let mut workspace_sub_views = folder.get_views_belong_to(&imported_workspace_id);
|
||||
assert_eq!(
|
||||
workspace_sub_views.len(),
|
||||
1,
|
||||
"Expected 1 view, found {:?}",
|
||||
workspace_sub_views
|
||||
);
|
||||
|
||||
let imported_view = workspace_sub_views.pop().unwrap();
|
||||
assert_eq!(imported_view.name, "Blog Post");
|
||||
let document = client
|
||||
.get_document(&imported_workspace_id, &imported_view.id)
|
||||
.await;
|
||||
|
||||
let host = client.api_client.base_url.clone();
|
||||
let object_id = imported_view.id.clone();
|
||||
let mut expected_urls = vec![
|
||||
"PGTRCFsf2duc7iP3KjE62Xs8LE7B96a0aQtLtGtfIcw=.jpg",
|
||||
"fFWPgqwdqbaxPe7Q_vUO143Sa2FypnRcWVibuZYdkRI=.jpg",
|
||||
"EIj9Z3yj8Gw8UW60U8CLXx7ulckEs5Eu84LCFddCXII=.jpg",
|
||||
]
|
||||
.into_iter()
|
||||
.map(|s| format!("{host}/{imported_workspace_id}/v1/blob/{object_id}/{s}"))
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
let page_block_id = document.get_page_id().unwrap();
|
||||
let block_ids = document.get_block_children_ids(&page_block_id);
|
||||
for block_id in block_ids.iter() {
|
||||
if let Some((block_type, block_data)) = document.get_block_data(block_id) {
|
||||
if matches!(block_type, BlockType::Image) {
|
||||
let url = block_data.get(URL_FIELD).unwrap().as_str().unwrap();
|
||||
expected_urls.retain(|allowed_url| !url.contains(allowed_url));
|
||||
}
|
||||
}
|
||||
}
|
||||
println!("{:?}", expected_urls);
|
||||
assert!(expected_urls.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn import_project_and_task_zip_test() {
|
||||
let (client, imported_workspace_id) = import_zip("project&task.zip").await;
|
||||
let folder = client.get_folder(&imported_workspace_id).await;
|
||||
let workspace_database = client.get_workspace_database(&imported_workspace_id).await;
|
||||
let mut workspace_sub_views = folder.get_views_belong_to(&imported_workspace_id);
|
||||
assert_eq!(
|
||||
workspace_sub_views.len(),
|
||||
1,
|
||||
"Expected 1 view, found {:?}",
|
||||
workspace_sub_views
|
||||
);
|
||||
|
||||
let imported_view = workspace_sub_views.pop().unwrap();
|
||||
assert_eq!(imported_view.name, "Projects & Tasks");
|
||||
assert_eq!(
|
||||
imported_view.children.len(),
|
||||
2,
|
||||
"Expected 2 views, found {:?}",
|
||||
imported_view.children
|
||||
);
|
||||
assert_eq!(imported_view.layout, ViewLayout::Document);
|
||||
|
||||
let sub_views = folder.get_views_belong_to(&imported_view.id);
|
||||
for (index, view) in sub_views.iter().enumerate() {
|
||||
if index == 0 {
|
||||
assert_eq!(view.name, "Projects");
|
||||
assert_eq!(view.layout, ViewLayout::Grid);
|
||||
|
||||
let database_id = workspace_database
|
||||
.get_database_meta_with_view_id(&view.id)
|
||||
.unwrap()
|
||||
.database_id
|
||||
.clone();
|
||||
let database = client
|
||||
.get_database(&imported_workspace_id, &database_id)
|
||||
.await;
|
||||
let inline_views = database.get_inline_view_id();
|
||||
let fields = database.get_fields_in_view(&inline_views, None);
|
||||
let rows = database.collect_all_rows().await;
|
||||
assert_eq!(rows.len(), 4);
|
||||
assert_eq!(fields.len(), 13);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if index == 1 {
|
||||
assert_eq!(view.name, "Tasks");
|
||||
assert_eq!(view.layout, ViewLayout::Grid);
|
||||
|
||||
let database_id = workspace_database
|
||||
.get_database_meta_with_view_id(&view.id)
|
||||
.unwrap()
|
||||
.database_id
|
||||
.clone();
|
||||
let database = client
|
||||
.get_database(&imported_workspace_id, &database_id)
|
||||
.await;
|
||||
let inline_views = database.get_inline_view_id();
|
||||
let fields = database.get_fields_in_view(&inline_views, None);
|
||||
let rows = database.collect_all_rows().await;
|
||||
assert_eq!(rows.len(), 17);
|
||||
assert_eq!(fields.len(), 13);
|
||||
continue;
|
||||
}
|
||||
|
||||
panic!("Unexpected view found: {:?}", view);
|
||||
}
|
||||
}
|
||||
|
||||
async fn import_zip(name: &str) -> (TestClient, String) {
|
||||
let client = TestClient::new_user().await;
|
||||
|
||||
let file_path = PathBuf::from(format!("tests/workspace/asset/{name}"));
|
||||
client.api_client.import_file(&file_path).await.unwrap();
|
||||
let default_workspace_id = client.workspace_id().await;
|
||||
|
||||
// when importing a file, the workspace for the file should be created and it's
|
||||
// not visible until the import task is completed
|
||||
let workspaces = client.api_client.get_workspaces().await.unwrap();
|
||||
assert_eq!(workspaces.len(), 1);
|
||||
|
||||
let tasks = client.api_client.get_import_list().await.unwrap().tasks;
|
||||
assert_eq!(tasks.len(), 1);
|
||||
assert_eq!(tasks[0].status, ImportTaskStatus::Pending);
|
||||
|
||||
let mut task_completed = false;
|
||||
let max_retries = 12;
|
||||
let mut retries = 0;
|
||||
while !task_completed && retries < max_retries {
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
let tasks = client.api_client.get_import_list().await.unwrap().tasks;
|
||||
assert_eq!(tasks.len(), 1);
|
||||
|
||||
if tasks[0].status == ImportTaskStatus::Completed {
|
||||
task_completed = true;
|
||||
}
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
assert!(
|
||||
task_completed,
|
||||
"The import task was not completed within the expected time."
|
||||
);
|
||||
|
||||
// after the import task is completed, the new workspace should be visible
|
||||
let mut workspaces = client.api_client.get_workspaces().await.unwrap();
|
||||
assert_eq!(workspaces.len(), 2);
|
||||
|
||||
let imported_workspace = workspaces
|
||||
.into_iter()
|
||||
.find(|workspace| workspace.workspace_id.to_string() != default_workspace_id)
|
||||
.expect("Failed to find imported workspace");
|
||||
|
||||
let imported_workspace_id = imported_workspace.workspace_id.to_string();
|
||||
(client, imported_workspace_id)
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
mod access_request;
|
||||
mod default_user_workspace;
|
||||
mod edit_workspace;
|
||||
mod import_test;
|
||||
mod invitation_crud;
|
||||
mod member_crud;
|
||||
mod page_view;
|
||||
|
|
|
@ -890,13 +890,12 @@ async fn duplicate_to_workspace_doc_inline_database() {
|
|||
|
||||
{
|
||||
// check that database_id is different
|
||||
let mut ws_db_collab = client_2
|
||||
let ws_db_collab = client_2
|
||||
.get_workspace_database_collab(&workspace_id_2)
|
||||
.await;
|
||||
let ws_db_body = WorkspaceDatabaseBody::open(&mut ws_db_collab);
|
||||
let txn = ws_db_collab.transact();
|
||||
let ws_db_body = WorkspaceDatabaseBody::open(ws_db_collab).unwrap();
|
||||
let dup_grid1_db_id = ws_db_body
|
||||
.get_all_database_meta(&txn)
|
||||
.get_all_database_meta()
|
||||
.into_iter()
|
||||
.find(|db_meta| db_meta.linked_views.contains(&view_of_grid1_fv.view_id))
|
||||
.unwrap()
|
||||
|
@ -908,6 +907,7 @@ async fn duplicate_to_workspace_doc_inline_database() {
|
|||
let dup_db_id = DatabaseBody::database_id_from_collab(&db_collab).unwrap();
|
||||
assert_ne!(dup_db_id, pub_db_id);
|
||||
|
||||
let txn = db_collab.transact();
|
||||
let view_map = {
|
||||
let map_ref = db_collab
|
||||
.data
|
||||
|
|
|
@ -12,10 +12,12 @@ use tokio::select;
|
|||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let appflowy_cloud_bin_name = "appflowy_cloud";
|
||||
let appflowy_history_bin_name = "appflowy_history";
|
||||
let history = "appflowy_history";
|
||||
let worker = "appflowy_worker";
|
||||
|
||||
kill_existing_process(appflowy_cloud_bin_name).await?;
|
||||
kill_existing_process(appflowy_history_bin_name).await?;
|
||||
kill_existing_process(history).await?;
|
||||
kill_existing_process(worker).await?;
|
||||
|
||||
let mut appflowy_cloud_cmd = Command::new("cargo")
|
||||
.args(["run", "--features", "history"])
|
||||
|
@ -33,12 +35,24 @@ async fn main() -> Result<()> {
|
|||
.spawn()
|
||||
.context("Failed to start AppFlowy-History process")?;
|
||||
|
||||
let mut appflowy_worker_cmd = Command::new("cargo")
|
||||
.args([
|
||||
"run",
|
||||
"--manifest-path",
|
||||
"./services/appflowy-worker/Cargo.toml",
|
||||
])
|
||||
.spawn()
|
||||
.context("Failed to start AppFlowy-Worker process")?;
|
||||
|
||||
select! {
|
||||
status = appflowy_cloud_cmd.wait() => {
|
||||
handle_process_exit(status?, appflowy_cloud_bin_name)?;
|
||||
},
|
||||
status = appflowy_history_cmd.wait() => {
|
||||
handle_process_exit(status?, appflowy_history_bin_name)?;
|
||||
handle_process_exit(status?, history)?;
|
||||
}
|
||||
status = appflowy_worker_cmd.wait() => {
|
||||
handle_process_exit(status?, worker)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue