chore: create tables (#16)

* chore: create tables

* ci: remove postgres service

* chore: separate ci

* chore: run cargo clippy with sqlx offline mode

* refactor: create docker action

* chore: rename

* chore: update table

* chore: update supabase_admin

* chore: update supabase_admin

* chore: remove create auth.jwt function
This commit is contained in:
Nathan.fooo 2023-09-07 10:43:10 +08:00 committed by GitHub
parent f9825d4221
commit e2e6d79cb0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 530 additions and 144 deletions

View file

@ -4,6 +4,6 @@ spec.yaml
target/
deploy/
tests/
Dockerfile
docker/Dockerfile
scripts/
migrations/

View file

@ -1,4 +1,4 @@
name: Server
name: AppFlowyCloud
on:
push:
@ -8,30 +8,13 @@ on:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
SQLX_VERSION: 0.7.1
SQLX_FEATURES: "rustls,postgres"
APP__GOTRUE__BASE_URL: http://localhost:3000
APP__GOTRUE__JWT_SECRET: hello123
GOTRUE_SMTP_PASS: ${{ secrets.GOTRUE_SMTP_PASS }}
jobs:
test:
name: Test
name: Unit Tests
runs-on: ubuntu-latest
services:
postgres:
image: postgres:12
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
POSTGRES_DB: postgres
ports:
- 5432:5432
redis:
image: redis:7
ports:
- 6379:6379
steps:
- uses: actions/checkout@v3
@ -40,80 +23,16 @@ jobs:
with:
workspaces: |
AppFlowy-Cloud
- name: Install sqlx-cli
run:
cargo install sqlx-cli
--version=${{ env.SQLX_VERSION }}
--features ${{ env.SQLX_FEATURES }}
--no-default-features
--locked
- name: Migrate database
- name: Install Development Dependencies
run: |
cargo install sqlx-cli --version=${{ env.SQLX_VERSION }} --features ${{ env.SQLX_FEATURES }} --no-default-features --locked
sudo apt-get install libpq-dev -y
SKIP_DOCKER=true ./build/init_database.sh
- name: Check sqlx-data.json is up-to-date
./build/init_database.sh
./build/init_redis.sh
- name: Check sqlx-data.json
run: |
cargo sqlx prepare --check -- --bin appflowy_cloud
- name: Run Docker-Compose
run: |
docker-compose up -d
- name: Add registered user
run: |
export GOTRUE_MAILER_AUTOCONFIRM=true
docker-compose up -d
curl localhost:9998/signup \
--data-raw '{"email":"xigahi8979@tipent.com","password":"Hello123!"}' \
--header 'Content-Type: application/json'
export GOTRUE_MAILER_AUTOCONFIRM=false
docker-compose up -d
- name: Run tests
run: |
cargo test
fmt:
name: Rustfmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- name: Enforce formatting
run: cargo fmt --check
clippy:
name: Clippy
runs-on: ubuntu-latest
services:
postgres:
image: postgres:14
env:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
POSTGRES_DB: postgres
ports:
- 5432:5432
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable
with:
components: clippy
- uses: Swatinem/rust-cache@v2
with:
key: sqlx-${{ env.SQLX_VERSION }}
- name: Install sqlx-cli
run:
cargo install sqlx-cli
--version=${{ env.SQLX_VERSION }}
--features ${{ env.SQLX_FEATURES }}
--no-default-features
--locked
- name: Migrate database
run: |
sudo apt-get install libpq-dev -y
SKIP_DOCKER=true ./build/init_database.sh
- name: Linting
run: cargo clippy -- -D warnings

44
.github/workflows/docker.yml vendored Normal file
View file

@ -0,0 +1,44 @@
name: AppFlowyCloud
on:
push:
branches: [ main ]
pull_request:
types: [ opened, synchronize, reopened ]
branches: [ main ]
env:
APP__GOTRUE__BASE_URL: http://localhost:3000
APP__GOTRUE__JWT_SECRET: hello123
GOTRUE_SMTP_PASS: ${{ secrets.GOTRUE_SMTP_PASS }}
jobs:
test:
name: Docker
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
with:
workspaces: |
AppFlowy-Cloud
- name: Run Docker-Compose
run: |
docker-compose up -d
- name: Add registered user
run: |
export GOTRUE_MAILER_AUTOCONFIRM=true
docker-compose up -d
curl localhost:9998/signup \
--data-raw '{"email":"xigahi8979@tipent.com","password":"Hello123!"}' \
--header 'Content-Type: application/json'
export GOTRUE_MAILER_AUTOCONFIRM=false
docker-compose up -d
- name: Run tests
run: |
cargo test

29
.github/workflows/rustlint.yml vendored Normal file
View file

@ -0,0 +1,29 @@
name: Rust Lint
on:
push:
branches: [ main ]
pull_request:
types: [ opened, synchronize, reopened ]
branches: [ main ]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt,clippy
- uses: Swatinem/rust-cache@v2
with:
workspaces: |
AppFlowy-Cloud
- name: Rustfmt
run: cargo fmt --check
- name: Clippy
run: SQLX_OFFLINE=true cargo clippy -- -D warnings

View file

@ -1,18 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO users (uid, email, username, create_time, password)\n VALUES ($1, $2, $3, $4, $5)\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int8",
"Text",
"Text",
"Timestamptz",
"Text"
]
},
"nullable": []
},
"hash": "1775f308cda63b61c0aaf4d8d1c7cf00f6bff5041e821575f65867c32d42f1b0"
}

View file

@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT email\n FROM users\n WHERE uid = $1\n ",
"query": "\n SELECT email\n FROM af_user\n WHERE uid = $1\n ",
"describe": {
"columns": [
{
@ -18,5 +18,5 @@
false
]
},
"hash": "fdcaf51ec4b03ad8af6bddfe1f3a62ba08a1dd94d0a3bf26db506c7a948e2b5e"
"hash": "aa70aeecc5c9c9aae53c8b064810031b4701a7f8a07f026364de53a8cb6750b7"
}

View file

@ -0,0 +1,17 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO af_user (uid, email, name, password)\n VALUES ($1, $2, $3, $4)\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int8",
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "b3c6d628c4fa9b458f2ecadb9eda577f783f20c084850dd1a95cc9798dc73e95"
}

View file

@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT uid, password\n FROM users\n WHERE email = $1\n ",
"query": "\n SELECT uid, password\n FROM af_user \n WHERE email = $1\n ",
"describe": {
"columns": [
{
@ -24,5 +24,5 @@
false
]
},
"hash": "b188bb8915fa0fc8c5dfefa27f1b086b839633c3c4dc6e4991b43a608d46a170"
"hash": "dd713d68bddf1e4d13230dee18a9ef6f6490ad4e70e5f2e0f54a4dad8db7fbbf"
}

View file

@ -33,6 +33,8 @@ then
exit 1
fi
docker build -t postgres_with_pgjwt -f ./docker/Dockerfile_postgres .
docker run \
-e POSTGRES_USER=${DB_USER} \
-e POSTGRES_PASSWORD=${DB_PASSWORD} \
@ -40,7 +42,7 @@ then
-p "${DB_PORT}":5432 \
-d \
--name "appflowy_postgres_$(date '+%s')" \
postgres:14 -N 1000
postgres_with_pgjwt -N 1000
fi

View file

@ -2,6 +2,9 @@ version: '3'
services:
postgres:
image: postgres
build:
context: .
dockerfile: docker/Dockerfile_postgres
environment:
- POSTGRES_USER=${POSTGRES_USER:-postgres}
- POSTGRES_DB=${POSTGRES_DB:-postgres}
@ -47,7 +50,7 @@ services:
- APP_ENVIRONMENT=production
build:
context: .
dockerfile: ./Dockerfile
dockerfile: docker/Dockerfile
image: appflowy_cloud:${BACKEND_VERSION:-latest}
depends_on:
- redis

View file

@ -4,7 +4,7 @@ WORKDIR /app
RUN apt update && apt install lld clang -y
FROM chef as planner
COPY . .
COPY .. .
# Compute a lock-like file for our project
RUN cargo chef prepare --recipe-path recipe.json
@ -12,7 +12,7 @@ FROM chef as builder
COPY --from=planner /app/recipe.json recipe.json
# Build our project dependencies
RUN cargo chef cook --release --recipe-path recipe.json
COPY . .
COPY .. .
ENV SQLX_OFFLINE true
# Build the project
RUN cargo build --release --bin appflowy_cloud

View file

@ -0,0 +1,12 @@
FROM postgres:latest
# Install dependencies required for pgjwt
RUN apt-get update && \
apt-get install -y build-essential postgresql-server-dev-all git
# Clone and build pgjwt
RUN rm -rf pgjwt && \
git clone https://github.com/michelp/pgjwt.git && \
cd pgjwt && \
make && \
make install

View file

@ -0,0 +1,38 @@
-- Add migration script here
-- Create the anon and authenticated roles if they don't exist
CREATE OR REPLACE FUNCTION create_roles(roles text []) RETURNS void LANGUAGE plpgsql AS $$
DECLARE role_name text;
BEGIN FOREACH role_name IN ARRAY roles LOOP IF NOT EXISTS (
SELECT 1
FROM pg_roles
WHERE rolname = role_name
) THEN EXECUTE 'CREATE ROLE ' || role_name;
END IF;
END LOOP;
END;
$$;
SELECT create_roles(ARRAY ['anon', 'authenticated']);
-- Create supabase_admin user if it does not exist
DO $$ BEGIN IF NOT EXISTS (
SELECT
FROM pg_catalog.pg_roles
WHERE rolname = 'supabase_admin'
) THEN CREATE USER supabase_admin LOGIN CREATEROLE CREATEDB REPLICATION BYPASSRLS;
END IF;
END $$;
-- Create supabase_auth_admin user if it does not exist
DO $$ BEGIN IF NOT EXISTS (
SELECT
FROM pg_catalog.pg_roles
WHERE rolname = 'supabase_auth_admin'
) THEN CREATE USER supabase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION PASSWORD 'root';
END IF;
END $$;
-- Create auth schema if it does not exist
CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION supabase_auth_admin;
-- Grant permissions
GRANT CREATE ON DATABASE postgres TO supabase_auth_admin;
-- Set search_path for supabase_auth_admin
ALTER USER supabase_auth_admin
SET search_path = 'auth';

View file

@ -1,9 +1,29 @@
-- Add migration script here
CREATE TABLE users
(
uid bigint PRIMARY KEY,
username TEXT NOT NULL,
password TEXT NOT NULL,
email TEXT NOT NULL UNIQUE,
create_time timestamptz NOT NULL
);
-- Required by uuid_generate_v4()
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- user table
CREATE TABLE IF NOT EXISTS af_user (
uid BIGSERIAL PRIMARY KEY,
email TEXT NOT NULL DEFAULT '' UNIQUE,
password TEXT NOT NULL,
name TEXT NOT NULL DEFAULT '',
encryption_sign TEXT,
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE OR REPLACE FUNCTION update_updated_at_column_func() RETURNS TRIGGER AS $$ BEGIN NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
CREATE TRIGGER update_af_user_modtime BEFORE
UPDATE ON af_user FOR EACH ROW EXECUTE PROCEDURE update_updated_at_column_func();
CREATE OR REPLACE FUNCTION prevent_reset_encryption_sign_func() RETURNS TRIGGER AS $$ BEGIN IF OLD.encryption_sign IS NOT NULL
AND NEW.encryption_sign IS DISTINCT
FROM OLD.encryption_sign THEN RAISE EXCEPTION 'The encryption sign can not be reset once it has been set';
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trigger_prevent_reset_encryption_sign BEFORE
UPDATE ON af_user FOR EACH ROW EXECUTE FUNCTION prevent_reset_encryption_sign_func();

View file

@ -1,8 +0,0 @@
-- Add migration script here
CREATE USER supabase_admin LOGIN CREATEROLE CREATEDB REPLICATION BYPASSRLS;
-- Supabase super admin
CREATE USER supabase_auth_admin NOINHERIT CREATEROLE LOGIN NOREPLICATION PASSWORD 'root';
CREATE SCHEMA IF NOT EXISTS auth AUTHORIZATION supabase_auth_admin;
GRANT CREATE ON DATABASE postgres TO supabase_auth_admin;
ALTER USER supabase_auth_admin SET search_path = 'auth';

View file

@ -0,0 +1,70 @@
-- Create the af_roles table
CREATE TABLE IF NOT EXISTS af_roles (
id SERIAL PRIMARY KEY,
name TEXT UNIQUE NOT NULL
);
-- Insert default roles
INSERT INTO af_roles (name)
VALUES ('Owner'),
('Member'),
('Guest');
CREATE TABLE af_permissions (
id SERIAL PRIMARY KEY,
name VARCHAR(255) UNIQUE NOT NULL,
access_level INTEGER,
description TEXT
);
-- Insert default permissions
INSERT INTO af_permissions (name, description, access_level)
VALUES ('Read only', 'Can read', 10),
(
'Read and comment',
'Can read and comment, but not edit',
20
),
(
'Read and write',
'Can read and edit, but not share with others',
30
),
(
'Full access',
'Can edit and share with others',
50
);
-- Represents a permission that a role has. The list of all permissions a role has can be obtained by querying this table for all rows with a given role_id.
CREATE TABLE af_role_permissions (
role_id INT REFERENCES af_roles(id),
permission_id INT REFERENCES af_permissions(id),
PRIMARY KEY (role_id, permission_id)
);
-- Associate permissions with roles
WITH role_ids AS (
SELECT id,
name
FROM af_roles
WHERE name IN ('Owner', 'Member', 'Guest')
),
permission_ids AS (
SELECT id,
name
FROM af_permissions
WHERE name IN ('Full access', 'Read and write', 'Read only')
)
INSERT INTO af_role_permissions (role_id, permission_id)
SELECT r.id,
p.id
FROM role_ids r
CROSS JOIN permission_ids p
WHERE (
r.name = 'Owner'
AND p.name = 'Full access'
)
OR (
r.name = 'Member'
AND p.name = 'Read and write'
)
OR (
r.name = 'Guest'
AND p.name = 'Read only'
);

View file

@ -0,0 +1,52 @@
-- af_workspace contains all the workspaces. Each workspace contains a list of members defined in af_workspace_member
CREATE TABLE IF NOT EXISTS af_workspace (
workspace_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
database_storage_id UUID DEFAULT uuid_generate_v4(),
owner_uid BIGINT REFERENCES af_user(uid) ON DELETE CASCADE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
-- 0: Free
workspace_type INTEGER NOT NULL DEFAULT 0,
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
workspace_name TEXT DEFAULT 'My Workspace'
);
-- This trigger is fired after an insert operation on the af_user table. It automatically creates a workspace
-- in the af_workspace table with the uid of the new user profile as the owner_uid
CREATE OR REPLACE FUNCTION create_af_workspace_func() RETURNS TRIGGER AS $$BEGIN
INSERT INTO af_workspace (owner_uid)
VALUES (NEW.uid);
RETURN NEW;
END $$LANGUAGE plpgsql;
CREATE TRIGGER create_af_workspace_trigger
AFTER
INSERT ON af_user FOR EACH ROW EXECUTE FUNCTION create_af_workspace_func();
-- af_workspace_member contains all the members associated with a workspace and their roles.
CREATE TABLE IF NOT EXISTS af_workspace_member (
uid BIGINT NOT NULL,
role_id INT NOT NULL REFERENCES af_roles(id),
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
PRIMARY KEY (uid, workspace_id)
);
CREATE UNIQUE INDEX idx_af_workspace_member ON af_workspace_member (uid, workspace_id, role_id);
-- This trigger is fired after an insert operation on the af_workspace table. It automatically creates a workspace
-- member in the af_workspace_member table. If the user is the owner of the workspace, they are given the role 'Owner'.
CREATE OR REPLACE FUNCTION manage_af_workspace_member_role_func() RETURNS TRIGGER AS $$ BEGIN
INSERT INTO af_workspace_member (uid, role_id, workspace_id)
VALUES (
NEW.owner_uid,
(
SELECT id
FROM af_roles
WHERE name = 'Owner'
),
NEW.workspace_id
) ON CONFLICT (uid, workspace_id) DO
UPDATE
SET role_id = EXCLUDED.role_id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER manage_af_workspace_member_role_trigger
AFTER
INSERT ON af_workspace FOR EACH ROW EXECUTE FUNCTION manage_af_workspace_member_role_func();

View file

@ -0,0 +1,18 @@
-- af_user_profile_view is a view that contains all the user profiles and their latest workspace_id.
-- a subquery is first used to find the workspace_id of the workspace with the latest updated_at timestamp for each
-- user. This subquery is then joined with the af_user table to create the view. Note that a LEFT JOIN is used in
-- case there are users without workspaces, in which case latest_workspace_id will be NULL for those users.
CREATE OR REPLACE VIEW af_user_profile_view AS
SELECT u.*,
w.workspace_id AS latest_workspace_id
FROM af_user u
INNER JOIN (
SELECT uid,
workspace_id,
rank() OVER (
PARTITION BY uid
ORDER BY updated_at DESC
) AS rn
FROM af_workspace_member
) w ON u.uid = w.uid
AND w.rn = 1;

View file

@ -0,0 +1,162 @@
-- af_collab contains all the collabs.
CREATE TABLE IF NOT EXISTS af_collab(
oid TEXT PRIMARY KEY,
owner_uid BIGINT NOT NULL,
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
-- 0: Private, 1: Shared
access_level INTEGER NOT NULL DEFAULT 0,
deleted_at TIMESTAMP WITH TIME ZONE DEFAULT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE UNIQUE INDEX idx_af_collab_oid ON af_collab (oid);
-- collab update table.
CREATE TABLE IF NOT EXISTS af_collab_update (
oid TEXT REFERENCES af_collab(oid) ON DELETE CASCADE,
blob BYTEA NOT NULL,
len INTEGER,
partition_key INTEGER NOT NULL,
md5 TEXT DEFAULT '',
did TEXT DEFAULT '',
encrypt INTEGER DEFAULT 0,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
workspace_id UUID NOT NULL REFERENCES af_workspace(workspace_id) ON DELETE CASCADE,
PRIMARY KEY (oid, partition_key)
) PARTITION BY LIST (partition_key);
CREATE TABLE af_collab_update_document PARTITION OF af_collab_update FOR
VALUES IN (0);
CREATE TABLE af_collab_update_database PARTITION OF af_collab_update FOR
VALUES IN (1);
CREATE TABLE af_collab_update_w_database PARTITION OF af_collab_update FOR
VALUES IN (2);
CREATE TABLE af_collab_update_folder PARTITION OF af_collab_update FOR
VALUES IN (3);
CREATE TABLE af_collab_update_database_row PARTITION OF af_collab_update FOR
VALUES IN (4);
CREATE TABLE af_collab_update_user_awareness PARTITION OF af_collab_update FOR
VALUES IN (5);
-- This trigger is fired before an insert operation on the af_collab_update table. It checks if a corresponding collab
-- exists in the af_collab table. If not, it creates one with the oid, uid, and current timestamp. It might cause a
-- performance issue if the af_collab_update table is updated very frequently, especially if the af_collab table is large
-- and if the oid column isn't indexed
CREATE OR REPLACE FUNCTION insert_into_af_collab_if_not_exists() RETURNS TRIGGER AS $$ BEGIN IF NOT EXISTS (
SELECT 1
FROM af_collab
WHERE oid = NEW.oid
) THEN
INSERT INTO af_collab (oid, owner_uid, workspace_id, created_at)
VALUES (
NEW.oid,
NEW.uid,
NEW.workspace_id,
CURRENT_TIMESTAMP
);
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER insert_into_af_collab_trigger BEFORE
INSERT ON af_collab_update FOR EACH ROW EXECUTE FUNCTION insert_into_af_collab_if_not_exists();
CREATE TABLE af_collab_member (
uid BIGINT REFERENCES af_user(uid) ON DELETE CASCADE,
oid TEXT REFERENCES af_collab(oid) ON DELETE CASCADE,
role_id INTEGER REFERENCES af_roles(id),
PRIMARY KEY(uid, oid)
);
-- This trigger is fired after an insert operation on the af_collab table. It automatically adds the collab's owner
-- to the af_collab_member table with the role 'Owner'.
CREATE OR REPLACE FUNCTION insert_into_af_collab_member() RETURNS TRIGGER AS $$ BEGIN
INSERT INTO af_collab_member (oid, uid, role_id)
VALUES (
NEW.oid,
NEW.owner_uid,
(
SELECT id
FROM af_roles
WHERE name = 'Owner'
)
);
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER insert_into_af_collab_member_trigger
AFTER
INSERT ON af_collab FOR EACH ROW EXECUTE FUNCTION insert_into_af_collab_member();
-- collab statistics. It will be used to store the edit_count of the collab.
CREATE TABLE IF NOT EXISTS af_collab_statistics (
oid TEXT PRIMARY KEY,
edit_count BIGINT NOT NULL DEFAULT 0
);
-- This trigger is fired after an insert operation on the af_collab_update table. It increments the edit_count of the
-- corresponding collab in the af_collab_statistics table. If the collab doesn't exist in the af_collab_statistics table,
-- it creates one with edit_count set to 1.
CREATE OR REPLACE FUNCTION increment_af_collab_edit_count() RETURNS TRIGGER AS $$BEGIN IF EXISTS(
SELECT 1
FROM af_collab_statistics
WHERE oid = NEW.oid
) THEN
UPDATE af_collab_statistics
SET edit_count = edit_count + 1
WHERE oid = NEW.oid;
ELSE
INSERT INTO af_collab_statistics (oid, edit_count)
VALUES (NEW.oid, 1);
END IF;
RETURN NEW;
END;
$$LANGUAGE plpgsql;
CREATE TRIGGER af_collab_update_edit_count_trigger
AFTER
INSERT ON af_collab_update FOR EACH ROW EXECUTE FUNCTION increment_af_collab_edit_count();
-- collab snapshot. It will be used to store the snapshots of the collab.
CREATE TABLE IF NOT EXISTS af_collab_snapshot (
sid BIGSERIAL PRIMARY KEY,
oid TEXT NOT NULL,
name TEXT DEFAULT '',
blob BYTEA NOT NULL,
len INTEGER NOT NULL,
edit_count BIGINT NOT NULL DEFAULT 0,
encrypt INTEGER DEFAULT 0,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
-- This trigger is fired after an insert operation on the af_collab_snapshot table. It automatically sets the edit_count
-- of the new snapshot to the current edit_count of the collab in the af_collab_statistics table.
CREATE OR REPLACE FUNCTION af_collab_snapshot_update_edit_count() RETURNS TRIGGER AS $$ BEGIN NEW.edit_count := COALESCE(
(
SELECT af_collab_statistics.edit_count
FROM af_collab_statistics
WHERE af_collab_statistics.oid = NEW.oid
),
-- If the row in af_collab_statistics with given oid is found, set edit_count to 0
0
);
RETURN NEW;
END;
$$LANGUAGE plpgsql;
CREATE TRIGGER af_collab_snapshot_update_edit_count_trigger BEFORE
INSERT ON af_collab_snapshot FOR EACH ROW EXECUTE FUNCTION af_collab_snapshot_update_edit_count();
-- collab state view. It will be used to get the current state of the collab.
CREATE VIEW af_collab_state AS
SELECT a.oid,
a.created_at AS snapshot_created_at,
a.edit_count AS snapshot_edit_count,
b.edit_count AS current_edit_count
FROM af_collab_snapshot AS a
JOIN af_collab_statistics AS b ON a.oid = b.oid;
-- Insert a workspace member if the user with given uid is the owner of the workspace
CREATE OR REPLACE FUNCTION insert_af_workspace_member_if_owner(
p_uid BIGINT,
p_role_id INT,
p_workspace_id UUID
) RETURNS VOID AS $$ BEGIN -- If user is the owner, proceed with the insert operation
INSERT INTO af_workspace_member (uid, role_id, workspace_id)
SELECT p_uid,
p_role_id,
p_workspace_id
FROM af_workspace
WHERE workspace_id = p_workspace_id
AND owner_uid = p_uid;
-- Check if the insert operation was successful. If not, user is not the owner of the workspace.
IF NOT FOUND THEN RAISE EXCEPTION 'Unsupported operation: User is not the owner of the workspace.';
END IF;
END;
$$ LANGUAGE plpgsql;

View file

@ -0,0 +1,28 @@
-- Enable RLS on the af_collab_update table
ALTER TABLE af_collab_update ENABLE ROW LEVEL SECURITY;
CREATE POLICY af_collab_update_policy ON af_collab_update FOR ALL TO anon,
authenticated USING (true);
-- Enable RLS on the af_user table
-- Policy for INSERT
ALTER TABLE af_user ENABLE ROW LEVEL SECURITY;
CREATE POLICY af_user_insert_policy ON public.af_user FOR
INSERT TO anon,
authenticated WITH CHECK (true);
-- Policy for UPDATE
CREATE POLICY af_user_update_policy ON public.af_user FOR
UPDATE USING (true) WITH CHECK (true);
-- Policy for SELECT
CREATE POLICY af_user_select_policy ON public.af_user FOR
SELECT TO anon,
authenticated USING (true);
ALTER TABLE af_user FORCE ROW LEVEL SECURITY;
-- Enable RLS on the af_workspace_member table
ALTER TABLE af_workspace_member ENABLE ROW LEVEL SECURITY;
CREATE POLICY af_workspace_member_policy ON af_workspace_member FOR ALL TO anon,
authenticated USING (true);
ALTER TABLE af_workspace_member FORCE ROW LEVEL SECURITY;
-- Enable RLS on the af_workspace table
ALTER TABLE af_workspace ENABLE ROW LEVEL SECURITY;
CREATE POLICY af_workspace_policy ON af_workspace FOR ALL TO anon,
authenticated USING (true);
ALTER TABLE af_workspace FORCE ROW LEVEL SECURITY;

View file

@ -63,7 +63,7 @@ async fn get_stored_credentials(
let row = sqlx::query!(
r#"
SELECT uid, password
FROM users
FROM af_user
WHERE email = $1
"#,
email,

View file

@ -7,7 +7,6 @@ use crate::telemetry::spawn_blocking_with_tracing;
use actix_web::HttpRequest;
use anyhow::Context;
use chrono::Duration;
use chrono::Utc;
use std::ops::DerefMut;
use secrecy::zeroize::DefaultIsZeroes;
@ -76,13 +75,12 @@ pub async fn register(
let password = compute_hash_password(password.as_bytes()).map_err(internal_error)?;
let _ = sqlx::query!(
r#"
INSERT INTO users (uid, email, username, create_time, password)
VALUES ($1, $2, $3, $4, $5)
"#,
INSERT INTO af_user (uid, email, name, password)
VALUES ($1, $2, $3, $4)
"#,
uid,
email,
username,
Utc::now(),
password.expose_secret(),
)
.execute(transaction.deref_mut())
@ -132,7 +130,7 @@ pub async fn change_password(
.context("Failed to hash password")??;
// Save password to disk
let sql = "UPDATE users SET password = $1 where uid = $2";
let sql = "UPDATE af_user SET password = $1 where uid = $2";
let _ = sqlx::query(sql)
.bind(new_hash_password.expose_secret())
.bind(logged_user.expose_secret())
@ -155,14 +153,14 @@ pub async fn get_user_email(
let row = sqlx::query!(
r#"
SELECT email
FROM users
FROM af_user
WHERE uid = $1
"#,
uid,
)
.fetch_one(transaction.deref_mut())
.await
.context("Failed to retrieve the username`")?;
.context("Failed to retrieve the user email`")?;
Ok(row.email)
}
@ -171,7 +169,7 @@ async fn is_email_exist(
transaction: &mut Transaction<'_, Postgres>,
email: &str,
) -> Result<bool, anyhow::Error> {
let result = sqlx::query(r#"SELECT email FROM users WHERE email = $1"#)
let result = sqlx::query(r#"SELECT email FROM af_user WHERE email = $1"#)
.bind(email)
.fetch_optional(transaction.deref_mut())
.await?;