Merge pull request #1420 from AppFlowy-IO/refactor/flowy_revision_crate

Refactor: merge multiple revision into one
This commit is contained in:
Nathan.fooo 2022-11-08 13:41:04 +08:00 committed by GitHub
commit f36cc9a5d0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
57 changed files with 1676 additions and 635 deletions

View file

@ -1072,10 +1072,13 @@ dependencies = [
"bytes", "bytes",
"dashmap", "dashmap",
"flowy-error", "flowy-error",
"flowy-revision",
"flowy-sync", "flowy-sync",
"futures-util", "futures-util",
"lib-infra", "lib-infra",
"lib-ws", "lib-ws",
"nanoid",
"parking_lot 0.11.2",
"serde", "serde",
"serde_json", "serde_json",
"strum", "strum",

View file

@ -1,6 +1,6 @@
use bytes::Bytes; use bytes::Bytes;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_revision::{RevisionCompress, RevisionObjectDeserializer, RevisionObjectSerializer}; use flowy_revision::{RevisionMergeable, RevisionObjectDeserializer, RevisionObjectSerializer};
use flowy_sync::entities::revision::Revision; use flowy_sync::entities::revision::Revision;
use lib_ot::core::{Extension, NodeDataBuilder, NodeOperation, NodeTree, NodeTreeContext, Selection, Transaction}; use lib_ot::core::{Extension, NodeDataBuilder, NodeOperation, NodeTree, NodeTreeContext, Selection, Transaction};
use lib_ot::text_delta::DeltaTextOperationBuilder; use lib_ot::text_delta::DeltaTextOperationBuilder;
@ -28,9 +28,9 @@ impl Document {
} }
} }
pub fn md5(&self) -> String { pub fn document_md5(&self) -> String {
// format!("{:x}", md5::compute(bytes)) let bytes = self.tree.to_bytes();
"".to_owned() format!("{:x}", md5::compute(&bytes))
} }
pub fn get_tree(&self) -> &NodeTree { pub fn get_tree(&self) -> &NodeTree {
@ -96,7 +96,7 @@ impl RevisionObjectSerializer for DocumentRevisionSerde {
} }
pub(crate) struct DocumentRevisionCompress(); pub(crate) struct DocumentRevisionCompress();
impl RevisionCompress for DocumentRevisionCompress { impl RevisionMergeable for DocumentRevisionCompress {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
DocumentRevisionSerde::combine_revisions(revisions) DocumentRevisionSerde::combine_revisions(revisions)
} }

View file

@ -29,7 +29,9 @@ impl AppFlowyDocumentEditor {
mut rev_manager: RevisionManager<Arc<ConnectionPool>>, mut rev_manager: RevisionManager<Arc<ConnectionPool>>,
cloud_service: Arc<dyn RevisionCloudService>, cloud_service: Arc<dyn RevisionCloudService>,
) -> FlowyResult<Arc<Self>> { ) -> FlowyResult<Arc<Self>> {
let document = rev_manager.load::<DocumentRevisionSerde>(Some(cloud_service)).await?; let document = rev_manager
.initialize::<DocumentRevisionSerde>(Some(cloud_service))
.await?;
let rev_manager = Arc::new(rev_manager); let rev_manager = Arc::new(rev_manager);
let command_sender = spawn_edit_queue(user, rev_manager.clone(), document); let command_sender = spawn_edit_queue(user, rev_manager.clone(), document);
let doc_id = doc_id.to_string(); let doc_id = doc_id.to_string();
@ -81,7 +83,13 @@ fn spawn_edit_queue(
} }
impl DocumentEditor for Arc<AppFlowyDocumentEditor> { impl DocumentEditor for Arc<AppFlowyDocumentEditor> {
fn close(&self) {} #[tracing::instrument(name = "close document editor", level = "trace", skip_all)]
fn close(&self) {
let rev_manager = self.rev_manager.clone();
tokio::spawn(async move {
rev_manager.close().await;
});
}
fn export(&self) -> FutureResult<String, FlowyError> { fn export(&self) -> FutureResult<String, FlowyError> {
let this = self.clone(); let this = self.clone();

View file

@ -63,7 +63,7 @@ impl DocumentQueue {
Command::ComposeTransaction { transaction, ret } => { Command::ComposeTransaction { transaction, ret } => {
self.document.write().await.apply_transaction(transaction.clone())?; self.document.write().await.apply_transaction(transaction.clone())?;
let _ = self let _ = self
.save_local_operations(transaction, self.document.read().await.md5()) .save_local_operations(transaction, self.document.read().await.document_md5())
.await?; .await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
} }
@ -79,8 +79,7 @@ impl DocumentQueue {
async fn save_local_operations(&self, transaction: Transaction, md5: String) -> Result<RevId, FlowyError> { async fn save_local_operations(&self, transaction: Transaction, md5: String) -> Result<RevId, FlowyError> {
let bytes = Bytes::from(transaction.to_bytes()?); let bytes = Bytes::from(transaction.to_bytes()?);
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let user_id = self.user.user_id()?; let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, md5);
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, &user_id, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?; let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(rev_id.into()) Ok(rev_id.into())
} }

View file

@ -5,22 +5,22 @@ use crate::services::rev_sqlite::{SQLiteDeltaDocumentRevisionPersistence, SQLite
use crate::services::DocumentPersistence; use crate::services::DocumentPersistence;
use crate::{errors::FlowyError, DocumentCloudService}; use crate::{errors::FlowyError, DocumentCloudService};
use bytes::Bytes; use bytes::Bytes;
use dashmap::DashMap;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::FlowyResult; use flowy_error::FlowyResult;
use flowy_revision::{ use flowy_revision::{
RevisionCloudService, RevisionManager, RevisionPersistence, RevisionWebSocket, SQLiteRevisionSnapshotPersistence, RevisionCloudService, RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, RevisionWebSocket,
SQLiteRevisionSnapshotPersistence,
}; };
use flowy_sync::client_document::initial_delta_document_content; use flowy_sync::client_document::initial_delta_document_content;
use flowy_sync::entities::{ use flowy_sync::entities::{document::DocumentIdPB, revision::Revision, ws_data::ServerRevisionWSData};
document::DocumentIdPB, use flowy_sync::util::md5;
revision::{md5, RepeatedRevision, Revision},
ws_data::ServerRevisionWSData,
};
use lib_infra::future::FutureResult; use lib_infra::future::FutureResult;
use lib_infra::ref_map::{RefCountHashMap, RefCountValue};
use lib_ws::WSConnectState; use lib_ws::WSConnectState;
use std::any::Any; use std::any::Any;
use std::{convert::TryInto, sync::Arc}; use std::{convert::TryInto, sync::Arc};
use tokio::sync::RwLock;
pub trait DocumentUser: Send + Sync { pub trait DocumentUser: Send + Sync {
fn user_dir(&self) -> Result<String, FlowyError>; fn user_dir(&self) -> Result<String, FlowyError>;
@ -78,7 +78,7 @@ impl std::default::Default for DocumentConfig {
pub struct DocumentManager { pub struct DocumentManager {
cloud_service: Arc<dyn DocumentCloudService>, cloud_service: Arc<dyn DocumentCloudService>,
rev_web_socket: Arc<dyn RevisionWebSocket>, rev_web_socket: Arc<dyn RevisionWebSocket>,
editor_map: Arc<DocumentEditorMap>, editor_map: Arc<RwLock<RefCountHashMap<RefCountDocumentHandler>>>,
user: Arc<dyn DocumentUser>, user: Arc<dyn DocumentUser>,
persistence: Arc<DocumentPersistence>, persistence: Arc<DocumentPersistence>,
#[allow(dead_code)] #[allow(dead_code)]
@ -96,7 +96,7 @@ impl DocumentManager {
Self { Self {
cloud_service, cloud_service,
rev_web_socket, rev_web_socket,
editor_map: Arc::new(DocumentEditorMap::new()), editor_map: Arc::new(RwLock::new(RefCountHashMap::new())),
user: document_user, user: document_user,
persistence: Arc::new(DocumentPersistence::new(database)), persistence: Arc::new(DocumentPersistence::new(database)),
config, config,
@ -126,10 +126,10 @@ impl DocumentManager {
} }
#[tracing::instrument(level = "trace", skip(self, editor_id), fields(editor_id), err)] #[tracing::instrument(level = "trace", skip(self, editor_id), fields(editor_id), err)]
pub fn close_document_editor<T: AsRef<str>>(&self, editor_id: T) -> Result<(), FlowyError> { pub async fn close_document_editor<T: AsRef<str>>(&self, editor_id: T) -> Result<(), FlowyError> {
let editor_id = editor_id.as_ref(); let editor_id = editor_id.as_ref();
tracing::Span::current().record("editor_id", &editor_id); tracing::Span::current().record("editor_id", &editor_id);
self.editor_map.remove(editor_id); self.editor_map.write().await.remove(editor_id);
Ok(()) Ok(())
} }
@ -139,7 +139,7 @@ impl DocumentManager {
Ok(()) Ok(())
} }
pub async fn create_document<T: AsRef<str>>(&self, doc_id: T, revisions: RepeatedRevision) -> FlowyResult<()> { pub async fn create_document<T: AsRef<str>>(&self, doc_id: T, revisions: Vec<Revision>) -> FlowyResult<()> {
let doc_id = doc_id.as_ref().to_owned(); let doc_id = doc_id.as_ref().to_owned();
let db_pool = self.persistence.database.db_pool()?; let db_pool = self.persistence.database.db_pool()?;
// Maybe we could save the document to disk without creating the RevisionManager // Maybe we could save the document to disk without creating the RevisionManager
@ -151,9 +151,9 @@ impl DocumentManager {
pub async fn receive_ws_data(&self, data: Bytes) { pub async fn receive_ws_data(&self, data: Bytes) {
let result: Result<ServerRevisionWSData, protobuf::ProtobufError> = data.try_into(); let result: Result<ServerRevisionWSData, protobuf::ProtobufError> = data.try_into();
match result { match result {
Ok(data) => match self.editor_map.get(&data.object_id) { Ok(data) => match self.editor_map.read().await.get(&data.object_id) {
None => tracing::error!("Can't find any source handler for {:?}-{:?}", data.object_id, data.ty), None => tracing::error!("Can't find any source handler for {:?}-{:?}", data.object_id, data.ty),
Some(editor) => match editor.receive_ws_data(data).await { Some(handler) => match handler.0.receive_ws_data(data).await {
Ok(_) => {} Ok(_) => {}
Err(e) => tracing::error!("{}", e), Err(e) => tracing::error!("{}", e),
}, },
@ -182,13 +182,13 @@ impl DocumentManager {
/// returns: Result<Arc<DocumentEditor>, FlowyError> /// returns: Result<Arc<DocumentEditor>, FlowyError>
/// ///
async fn get_document_editor(&self, doc_id: &str) -> FlowyResult<Arc<dyn DocumentEditor>> { async fn get_document_editor(&self, doc_id: &str) -> FlowyResult<Arc<dyn DocumentEditor>> {
match self.editor_map.get(doc_id) { match self.editor_map.read().await.get(doc_id) {
None => { None => {
// //
tracing::warn!("Should call init_document_editor first"); tracing::warn!("Should call init_document_editor first");
self.init_document_editor(doc_id).await self.init_document_editor(doc_id).await
} }
Some(editor) => Ok(editor), Some(handler) => Ok(handler.0.clone()),
} }
} }
@ -218,14 +218,20 @@ impl DocumentManager {
DeltaDocumentEditor::new(doc_id, user, rev_manager, self.rev_web_socket.clone(), cloud_service) DeltaDocumentEditor::new(doc_id, user, rev_manager, self.rev_web_socket.clone(), cloud_service)
.await?, .await?,
); );
self.editor_map.insert(doc_id, editor.clone()); self.editor_map
.write()
.await
.insert(doc_id.to_string(), RefCountDocumentHandler(editor.clone()));
Ok(editor) Ok(editor)
} }
DocumentVersionPB::V1 => { DocumentVersionPB::V1 => {
let rev_manager = self.make_document_rev_manager(doc_id, pool.clone())?; let rev_manager = self.make_document_rev_manager(doc_id, pool.clone())?;
let editor: Arc<dyn DocumentEditor> = let editor: Arc<dyn DocumentEditor> =
Arc::new(AppFlowyDocumentEditor::new(doc_id, user, rev_manager, cloud_service).await?); Arc::new(AppFlowyDocumentEditor::new(doc_id, user, rev_manager, cloud_service).await?);
self.editor_map.insert(doc_id, editor.clone()); self.editor_map
.write()
.await
.insert(doc_id.to_string(), RefCountDocumentHandler(editor.clone()));
Ok(editor) Ok(editor)
} }
} }
@ -249,7 +255,8 @@ impl DocumentManager {
) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> { ) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> {
let user_id = self.user.user_id()?; let user_id = self.user.user_id()?;
let disk_cache = SQLiteDocumentRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteDocumentRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache); let configuration = RevisionPersistenceConfiguration::new(100, true);
let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache, configuration);
// let history_persistence = SQLiteRevisionHistoryPersistence::new(doc_id, pool.clone()); // let history_persistence = SQLiteRevisionHistoryPersistence::new(doc_id, pool.clone());
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(doc_id, pool); let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(doc_id, pool);
Ok(RevisionManager::new( Ok(RevisionManager::new(
@ -269,7 +276,8 @@ impl DocumentManager {
) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> { ) -> Result<RevisionManager<Arc<ConnectionPool>>, FlowyError> {
let user_id = self.user.user_id()?; let user_id = self.user.user_id()?;
let disk_cache = SQLiteDeltaDocumentRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteDeltaDocumentRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache); let configuration = RevisionPersistenceConfiguration::new(100, true);
let rev_persistence = RevisionPersistence::new(&user_id, doc_id, disk_cache, configuration);
// let history_persistence = SQLiteRevisionHistoryPersistence::new(doc_id, pool.clone()); // let history_persistence = SQLiteRevisionHistoryPersistence::new(doc_id, pool.clone());
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(doc_id, pool); let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(doc_id, pool);
Ok(RevisionManager::new( Ok(RevisionManager::new(
@ -294,7 +302,6 @@ impl RevisionCloudService for DocumentRevisionCloudService {
let params: DocumentIdPB = object_id.to_string().into(); let params: DocumentIdPB = object_id.to_string().into();
let server = self.server.clone(); let server = self.server.clone();
let token = self.token.clone(); let token = self.token.clone();
let user_id = user_id.to_string();
FutureResult::new(async move { FutureResult::new(async move {
match server.fetch_document(&token, params).await? { match server.fetch_document(&token, params).await? {
@ -302,14 +309,7 @@ impl RevisionCloudService for DocumentRevisionCloudService {
Some(payload) => { Some(payload) => {
let bytes = Bytes::from(payload.content.clone()); let bytes = Bytes::from(payload.content.clone());
let doc_md5 = md5(&bytes); let doc_md5 = md5(&bytes);
let revision = Revision::new( let revision = Revision::new(&payload.doc_id, payload.base_rev_id, payload.rev_id, bytes, doc_md5);
&payload.doc_id,
payload.base_rev_id,
payload.rev_id,
bytes,
&user_id,
doc_md5,
);
Ok(vec![revision]) Ok(vec![revision])
} }
} }
@ -317,40 +317,32 @@ impl RevisionCloudService for DocumentRevisionCloudService {
} }
} }
pub struct DocumentEditorMap { #[derive(Clone)]
inner: DashMap<String, Arc<dyn DocumentEditor>>, struct RefCountDocumentHandler(Arc<dyn DocumentEditor>);
impl RefCountValue for RefCountDocumentHandler {
fn did_remove(&self) {
self.0.close();
}
} }
impl DocumentEditorMap { impl std::ops::Deref for RefCountDocumentHandler {
fn new() -> Self { type Target = Arc<dyn DocumentEditor>;
Self { inner: DashMap::new() }
}
pub(crate) fn insert(&self, editor_id: &str, editor: Arc<dyn DocumentEditor>) { fn deref(&self) -> &Self::Target {
if self.inner.contains_key(editor_id) { &self.0
log::warn!("Editor:{} already open", editor_id);
}
self.inner.insert(editor_id.to_string(), editor);
}
pub(crate) fn get(&self, editor_id: &str) -> Option<Arc<dyn DocumentEditor>> {
Some(self.inner.get(editor_id)?.clone())
}
pub(crate) fn remove(&self, editor_id: &str) {
if let Some(editor) = self.get(editor_id) {
editor.close()
}
self.inner.remove(editor_id);
} }
} }
#[tracing::instrument(level = "trace", skip(web_socket, handlers))] #[tracing::instrument(level = "trace", skip(web_socket, handlers))]
fn listen_ws_state_changed(web_socket: Arc<dyn RevisionWebSocket>, handlers: Arc<DocumentEditorMap>) { fn listen_ws_state_changed(
web_socket: Arc<dyn RevisionWebSocket>,
handlers: Arc<RwLock<RefCountHashMap<RefCountDocumentHandler>>>,
) {
tokio::spawn(async move { tokio::spawn(async move {
let mut notify = web_socket.subscribe_state_changed().await; let mut notify = web_socket.subscribe_state_changed().await;
while let Ok(state) = notify.recv().await { while let Ok(state) = notify.recv().await {
handlers.inner.iter().for_each(|handler| { handlers.read().await.values().iter().for_each(|handler| {
handler.receive_ws_state(&state); handler.receive_ws_state(&state);
}) })
} }

View file

@ -6,7 +6,7 @@ use bytes::Bytes;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::{internal_error, FlowyResult}; use flowy_error::{internal_error, FlowyResult};
use flowy_revision::{ use flowy_revision::{
RevisionCloudService, RevisionCompress, RevisionManager, RevisionObjectDeserializer, RevisionObjectSerializer, RevisionCloudService, RevisionManager, RevisionMergeable, RevisionObjectDeserializer, RevisionObjectSerializer,
RevisionWebSocket, RevisionWebSocket,
}; };
use flowy_sync::entities::ws_data::ServerRevisionWSData; use flowy_sync::entities::ws_data::ServerRevisionWSData;
@ -45,7 +45,7 @@ impl DeltaDocumentEditor {
cloud_service: Arc<dyn RevisionCloudService>, cloud_service: Arc<dyn RevisionCloudService>,
) -> FlowyResult<Arc<Self>> { ) -> FlowyResult<Arc<Self>> {
let document = rev_manager let document = rev_manager
.load::<DeltaDocumentRevisionSerde>(Some(cloud_service)) .initialize::<DeltaDocumentRevisionSerde>(Some(cloud_service))
.await?; .await?;
let operations = DeltaTextOperations::from_bytes(&document.content)?; let operations = DeltaTextOperations::from_bytes(&document.content)?;
let rev_manager = Arc::new(rev_manager); let rev_manager = Arc::new(rev_manager);
@ -270,7 +270,7 @@ impl RevisionObjectSerializer for DeltaDocumentRevisionSerde {
} }
pub(crate) struct DeltaDocumentRevisionCompress(); pub(crate) struct DeltaDocumentRevisionCompress();
impl RevisionCompress for DeltaDocumentRevisionCompress { impl RevisionMergeable for DeltaDocumentRevisionCompress {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
DeltaDocumentRevisionSerde::combine_revisions(revisions) DeltaDocumentRevisionSerde::combine_revisions(revisions)
} }

View file

@ -3,7 +3,7 @@ use crate::DocumentUser;
use async_stream::stream; use async_stream::stream;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::FlowyError; use flowy_error::FlowyError;
use flowy_revision::{OperationsMD5, RevisionManager, TransformOperations}; use flowy_revision::{RevisionMD5, RevisionManager, TransformOperations};
use flowy_sync::{ use flowy_sync::{
client_document::{history::UndoResult, ClientDocument}, client_document::{history::UndoResult, ClientDocument},
entities::revision::{RevId, Revision}, entities::revision::{RevId, Revision},
@ -23,6 +23,7 @@ use tokio::sync::{oneshot, RwLock};
// serial. // serial.
pub(crate) struct EditDocumentQueue { pub(crate) struct EditDocumentQueue {
document: Arc<RwLock<ClientDocument>>, document: Arc<RwLock<ClientDocument>>,
#[allow(dead_code)]
user: Arc<dyn DocumentUser>, user: Arc<dyn DocumentUser>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>, rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
receiver: Option<EditorCommandReceiver>, receiver: Option<EditorCommandReceiver>,
@ -70,7 +71,7 @@ impl EditDocumentQueue {
EditorCommand::ComposeLocalOperations { operations, ret } => { EditorCommand::ComposeLocalOperations { operations, ret } => {
let mut document = self.document.write().await; let mut document = self.document.write().await;
let _ = document.compose_operations(operations.clone())?; let _ = document.compose_operations(operations.clone())?;
let md5 = document.md5(); let md5 = document.document_md5();
drop(document); drop(document);
let _ = self.save_local_operations(operations, md5).await?; let _ = self.save_local_operations(operations, md5).await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
@ -78,16 +79,16 @@ impl EditDocumentQueue {
EditorCommand::ComposeRemoteOperation { client_operations, ret } => { EditorCommand::ComposeRemoteOperation { client_operations, ret } => {
let mut document = self.document.write().await; let mut document = self.document.write().await;
let _ = document.compose_operations(client_operations.clone())?; let _ = document.compose_operations(client_operations.clone())?;
let md5 = document.md5(); let md5 = document.document_md5();
drop(document); drop(document);
let _ = ret.send(Ok(md5)); let _ = ret.send(Ok(md5.into()));
} }
EditorCommand::ResetOperations { operations, ret } => { EditorCommand::ResetOperations { operations, ret } => {
let mut document = self.document.write().await; let mut document = self.document.write().await;
let _ = document.set_operations(operations); let _ = document.set_operations(operations);
let md5 = document.md5(); let md5 = document.document_md5();
drop(document); drop(document);
let _ = ret.send(Ok(md5)); let _ = ret.send(Ok(md5.into()));
} }
EditorCommand::TransformOperations { operations, ret } => { EditorCommand::TransformOperations { operations, ret } => {
let f = || async { let f = || async {
@ -114,14 +115,14 @@ impl EditDocumentQueue {
EditorCommand::Insert { index, data, ret } => { EditorCommand::Insert { index, data, ret } => {
let mut write_guard = self.document.write().await; let mut write_guard = self.document.write().await;
let operations = write_guard.insert(index, data)?; let operations = write_guard.insert(index, data)?;
let md5 = write_guard.md5(); let md5 = write_guard.document_md5();
let _ = self.save_local_operations(operations, md5).await?; let _ = self.save_local_operations(operations, md5).await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
} }
EditorCommand::Delete { interval, ret } => { EditorCommand::Delete { interval, ret } => {
let mut write_guard = self.document.write().await; let mut write_guard = self.document.write().await;
let operations = write_guard.delete(interval)?; let operations = write_guard.delete(interval)?;
let md5 = write_guard.md5(); let md5 = write_guard.document_md5();
let _ = self.save_local_operations(operations, md5).await?; let _ = self.save_local_operations(operations, md5).await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
} }
@ -132,14 +133,14 @@ impl EditDocumentQueue {
} => { } => {
let mut write_guard = self.document.write().await; let mut write_guard = self.document.write().await;
let operations = write_guard.format(interval, attribute)?; let operations = write_guard.format(interval, attribute)?;
let md5 = write_guard.md5(); let md5 = write_guard.document_md5();
let _ = self.save_local_operations(operations, md5).await?; let _ = self.save_local_operations(operations, md5).await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
} }
EditorCommand::Replace { interval, data, ret } => { EditorCommand::Replace { interval, data, ret } => {
let mut write_guard = self.document.write().await; let mut write_guard = self.document.write().await;
let operations = write_guard.replace(interval, data)?; let operations = write_guard.replace(interval, data)?;
let md5 = write_guard.md5(); let md5 = write_guard.document_md5();
let _ = self.save_local_operations(operations, md5).await?; let _ = self.save_local_operations(operations, md5).await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
} }
@ -152,14 +153,14 @@ impl EditDocumentQueue {
EditorCommand::Undo { ret } => { EditorCommand::Undo { ret } => {
let mut write_guard = self.document.write().await; let mut write_guard = self.document.write().await;
let UndoResult { operations } = write_guard.undo()?; let UndoResult { operations } = write_guard.undo()?;
let md5 = write_guard.md5(); let md5 = write_guard.document_md5();
let _ = self.save_local_operations(operations, md5).await?; let _ = self.save_local_operations(operations, md5).await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
} }
EditorCommand::Redo { ret } => { EditorCommand::Redo { ret } => {
let mut write_guard = self.document.write().await; let mut write_guard = self.document.write().await;
let UndoResult { operations } = write_guard.redo()?; let UndoResult { operations } = write_guard.redo()?;
let md5 = write_guard.md5(); let md5 = write_guard.document_md5();
let _ = self.save_local_operations(operations, md5).await?; let _ = self.save_local_operations(operations, md5).await?;
let _ = ret.send(Ok(())); let _ = ret.send(Ok(()));
} }
@ -178,8 +179,7 @@ impl EditDocumentQueue {
async fn save_local_operations(&self, operations: DeltaTextOperations, md5: String) -> Result<RevId, FlowyError> { async fn save_local_operations(&self, operations: DeltaTextOperations, md5: String) -> Result<RevId, FlowyError> {
let bytes = operations.json_bytes(); let bytes = operations.json_bytes();
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let user_id = self.user.user_id()?; let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, md5);
let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, bytes, &user_id, md5);
let _ = self.rev_manager.add_local_revision(&revision).await?; let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(rev_id.into()) Ok(rev_id.into())
} }
@ -197,11 +197,11 @@ pub(crate) enum EditorCommand {
}, },
ComposeRemoteOperation { ComposeRemoteOperation {
client_operations: DeltaTextOperations, client_operations: DeltaTextOperations,
ret: Ret<OperationsMD5>, ret: Ret<RevisionMD5>,
}, },
ResetOperations { ResetOperations {
operations: DeltaTextOperations, operations: DeltaTextOperations,
ret: Ret<OperationsMD5>, ret: Ret<RevisionMD5>,
}, },
TransformOperations { TransformOperations {
operations: DeltaTextOperations, operations: DeltaTextOperations,

View file

@ -136,7 +136,7 @@ impl ConflictResolver<DeltaDocumentResolveOperations> for DocumentConflictResolv
fn compose_operations( fn compose_operations(
&self, &self,
operations: DeltaDocumentResolveOperations, operations: DeltaDocumentResolveOperations,
) -> BoxResultFuture<OperationsMD5, FlowyError> { ) -> BoxResultFuture<RevisionMD5, FlowyError> {
let tx = self.edit_cmd_tx.clone(); let tx = self.edit_cmd_tx.clone();
let operations = operations.into_inner(); let operations = operations.into_inner();
Box::pin(async move { Box::pin(async move {
@ -172,10 +172,7 @@ impl ConflictResolver<DeltaDocumentResolveOperations> for DocumentConflictResolv
}) })
} }
fn reset_operations( fn reset_operations(&self, operations: DeltaDocumentResolveOperations) -> BoxResultFuture<RevisionMD5, FlowyError> {
&self,
operations: DeltaDocumentResolveOperations,
) -> BoxResultFuture<OperationsMD5, FlowyError> {
let tx = self.edit_cmd_tx.clone(); let tx = self.edit_cmd_tx.clone();
let operations = operations.into_inner(); let operations = operations.into_inner();
Box::pin(async move { Box::pin(async move {

View file

@ -4,9 +4,9 @@ use crate::DocumentDatabase;
use bytes::Bytes; use bytes::Bytes;
use flowy_database::kv::KV; use flowy_database::kv::KV;
use flowy_error::FlowyResult; use flowy_error::FlowyResult;
use flowy_revision::disk::{RevisionDiskCache, RevisionRecord}; use flowy_revision::disk::{RevisionDiskCache, SyncRecord};
use flowy_sync::entities::revision::{md5, Revision}; use flowy_sync::entities::revision::Revision;
use flowy_sync::util::make_operations_from_revisions; use flowy_sync::util::{make_operations_from_revisions, md5};
use std::sync::Arc; use std::sync::Arc;
const V1_MIGRATION: &str = "DOCUMENT_V1_MIGRATION"; const V1_MIGRATION: &str = "DOCUMENT_V1_MIGRATION";
@ -43,8 +43,8 @@ impl DocumentMigration {
Ok(transaction) => { Ok(transaction) => {
let bytes = Bytes::from(transaction.to_bytes()?); let bytes = Bytes::from(transaction.to_bytes()?);
let md5 = format!("{:x}", md5::compute(&bytes)); let md5 = format!("{:x}", md5::compute(&bytes));
let revision = Revision::new(&document_id, 0, 1, bytes, &self.user_id, md5); let revision = Revision::new(&document_id, 0, 1, bytes, md5);
let record = RevisionRecord::new(revision); let record = SyncRecord::new(revision);
match disk_cache.create_revision_records(vec![record]) { match disk_cache.create_revision_records(vec![record]) {
Ok(_) => {} Ok(_) => {}
Err(err) => { Err(err) => {

View file

@ -7,9 +7,9 @@ use flowy_database::{
ConnectionPool, ConnectionPool,
}; };
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState}; use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{ use flowy_sync::{
entities::revision::{RevType, Revision, RevisionRange}, entities::revision::{Revision, RevisionRange},
util::md5, util::md5,
}; };
use std::collections::HashMap; use std::collections::HashMap;
@ -23,7 +23,7 @@ pub struct SQLiteDeltaDocumentRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDeltaDocumentRevisionPersistence { impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDeltaDocumentRevisionPersistence {
type Error = FlowyError; type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> { fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let _ = DeltaRevisionSql::create(revision_records, &*conn)?; let _ = DeltaRevisionSql::create(revision_records, &*conn)?;
Ok(()) Ok(())
@ -37,7 +37,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDeltaDocumentRevisionPersi
&self, &self,
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let records = DeltaRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?; let records = DeltaRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?;
Ok(records) Ok(records)
@ -47,7 +47,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDeltaDocumentRevisionPersi
&self, &self,
object_id: &str, object_id: &str,
range: &RevisionRange, range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = &*self.pool.get().map_err(internal_error)?; let conn = &*self.pool.get().map_err(internal_error)?;
let revisions = DeltaRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?; let revisions = DeltaRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?;
Ok(revisions) Ok(revisions)
@ -74,7 +74,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDeltaDocumentRevisionPersi
&self, &self,
object_id: &str, object_id: &str,
deleted_rev_ids: Option<Vec<i64>>, deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>, inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
conn.immediate_transaction::<_, FlowyError, _>(|| { conn.immediate_transaction::<_, FlowyError, _>(|| {
@ -97,7 +97,7 @@ impl SQLiteDeltaDocumentRevisionPersistence {
pub struct DeltaRevisionSql {} pub struct DeltaRevisionSql {}
impl DeltaRevisionSql { impl DeltaRevisionSql {
fn create(revision_records: Vec<RevisionRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> { fn create(revision_records: Vec<SyncRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
// Batch insert: https://diesel.rs/guides/all-about-inserts.html // Batch insert: https://diesel.rs/guides/all-about-inserts.html
let records = revision_records let records = revision_records
@ -143,7 +143,7 @@ impl DeltaRevisionSql {
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let mut sql = dsl::rev_table.filter(dsl::doc_id.eq(object_id)).into_boxed(); let mut sql = dsl::rev_table.filter(dsl::doc_id.eq(object_id)).into_boxed();
if let Some(rev_ids) = rev_ids { if let Some(rev_ids) = rev_ids {
sql = sql.filter(dsl::rev_id.eq_any(rev_ids)); sql = sql.filter(dsl::rev_id.eq_any(rev_ids));
@ -162,7 +162,7 @@ impl DeltaRevisionSql {
object_id: &str, object_id: &str,
range: RevisionRange, range: RevisionRange,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let rev_tables = dsl::rev_table let rev_tables = dsl::rev_table
.filter(dsl::rev_id.ge(range.start)) .filter(dsl::rev_id.ge(range.start))
.filter(dsl::rev_id.le(range.end)) .filter(dsl::rev_id.le(range.end))
@ -244,17 +244,16 @@ impl std::default::Default for TextRevisionState {
} }
} }
fn mk_revision_record_from_table(user_id: &str, table: RevisionTable) -> RevisionRecord { fn mk_revision_record_from_table(_user_id: &str, table: RevisionTable) -> SyncRecord {
let md5 = md5(&table.data); let md5 = md5(&table.data);
let revision = Revision::new( let revision = Revision::new(
&table.doc_id, &table.doc_id,
table.base_rev_id, table.base_rev_id,
table.rev_id, table.rev_id,
Bytes::from(table.data), Bytes::from(table.data),
user_id,
md5, md5,
); );
RevisionRecord { SyncRecord {
revision, revision,
state: table.state.into(), state: table.state.into(),
write_to_disk: false, write_to_disk: false,
@ -288,21 +287,3 @@ impl std::convert::From<i32> for RevTableType {
} }
} }
} }
impl std::convert::From<RevType> for RevTableType {
fn from(ty: RevType) -> Self {
match ty {
RevType::DeprecatedLocal => RevTableType::Local,
RevType::DeprecatedRemote => RevTableType::Remote,
}
}
}
impl std::convert::From<RevTableType> for RevType {
fn from(ty: RevTableType) -> Self {
match ty {
RevTableType::Local => RevType::DeprecatedLocal,
RevTableType::Remote => RevType::DeprecatedRemote,
}
}
}

View file

@ -7,7 +7,7 @@ use flowy_database::{
ConnectionPool, ConnectionPool,
}; };
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState}; use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{ use flowy_sync::{
entities::revision::{Revision, RevisionRange}, entities::revision::{Revision, RevisionRange},
util::md5, util::md5,
@ -22,7 +22,7 @@ pub struct SQLiteDocumentRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDocumentRevisionPersistence { impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDocumentRevisionPersistence {
type Error = FlowyError; type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> { fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let _ = DocumentRevisionSql::create(revision_records, &*conn)?; let _ = DocumentRevisionSql::create(revision_records, &*conn)?;
Ok(()) Ok(())
@ -36,7 +36,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDocumentRevisionPersistenc
&self, &self,
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let records = DocumentRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?; let records = DocumentRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?;
Ok(records) Ok(records)
@ -46,7 +46,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDocumentRevisionPersistenc
&self, &self,
object_id: &str, object_id: &str,
range: &RevisionRange, range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = &*self.pool.get().map_err(internal_error)?; let conn = &*self.pool.get().map_err(internal_error)?;
let revisions = DocumentRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?; let revisions = DocumentRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?;
Ok(revisions) Ok(revisions)
@ -73,7 +73,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteDocumentRevisionPersistenc
&self, &self,
object_id: &str, object_id: &str,
deleted_rev_ids: Option<Vec<i64>>, deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>, inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
conn.immediate_transaction::<_, FlowyError, _>(|| { conn.immediate_transaction::<_, FlowyError, _>(|| {
@ -96,7 +96,7 @@ impl SQLiteDocumentRevisionPersistence {
struct DocumentRevisionSql {} struct DocumentRevisionSql {}
impl DocumentRevisionSql { impl DocumentRevisionSql {
fn create(revision_records: Vec<RevisionRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> { fn create(revision_records: Vec<SyncRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
// Batch insert: https://diesel.rs/guides/all-about-inserts.html // Batch insert: https://diesel.rs/guides/all-about-inserts.html
let records = revision_records let records = revision_records
.into_iter() .into_iter()
@ -142,7 +142,7 @@ impl DocumentRevisionSql {
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let mut sql = dsl::document_rev_table let mut sql = dsl::document_rev_table
.filter(dsl::document_id.eq(object_id)) .filter(dsl::document_id.eq(object_id))
.into_boxed(); .into_boxed();
@ -163,7 +163,7 @@ impl DocumentRevisionSql {
object_id: &str, object_id: &str,
range: RevisionRange, range: RevisionRange,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let rev_tables = dsl::document_rev_table let rev_tables = dsl::document_rev_table
.filter(dsl::rev_id.ge(range.start)) .filter(dsl::rev_id.ge(range.start))
.filter(dsl::rev_id.le(range.end)) .filter(dsl::rev_id.le(range.end))
@ -220,17 +220,16 @@ impl std::default::Default for DocumentRevisionState {
} }
} }
fn mk_revision_record_from_table(user_id: &str, table: DocumentRevisionTable) -> RevisionRecord { fn mk_revision_record_from_table(_user_id: &str, table: DocumentRevisionTable) -> SyncRecord {
let md5 = md5(&table.data); let md5 = md5(&table.data);
let revision = Revision::new( let revision = Revision::new(
&table.document_id, &table.document_id,
table.base_rev_id, table.base_rev_id,
table.rev_id, table.rev_id,
Bytes::from(table.data), Bytes::from(table.data),
user_id,
md5, md5,
); );
RevisionRecord { SyncRecord {
revision, revision,
state: table.state.into(), state: table.state.into(),
write_to_disk: false, write_to_disk: false,

View file

@ -15,7 +15,10 @@ use bytes::Bytes;
use flowy_document::editor::initial_read_me; use flowy_document::editor::initial_read_me;
use flowy_error::FlowyError; use flowy_error::FlowyError;
use flowy_folder_data_model::user_default; use flowy_folder_data_model::user_default;
use flowy_revision::{RevisionManager, RevisionPersistence, RevisionWebSocket, SQLiteRevisionSnapshotPersistence}; use flowy_revision::{
RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, RevisionWebSocket,
SQLiteRevisionSnapshotPersistence,
};
use flowy_sync::{client_folder::FolderPad, entities::ws_data::ServerRevisionWSData}; use flowy_sync::{client_folder::FolderPad, entities::ws_data::ServerRevisionWSData};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use lib_infra::future::FutureResult; use lib_infra::future::FutureResult;
@ -165,7 +168,8 @@ impl FolderManager {
let pool = self.persistence.db_pool()?; let pool = self.persistence.db_pool()?;
let object_id = folder_id.as_ref(); let object_id = folder_id.as_ref();
let disk_cache = SQLiteFolderRevisionPersistence::new(user_id, pool.clone()); let disk_cache = SQLiteFolderRevisionPersistence::new(user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(user_id, object_id, disk_cache); let configuration = RevisionPersistenceConfiguration::new(100, false);
let rev_persistence = RevisionPersistence::new(user_id, object_id, disk_cache, configuration);
let rev_compactor = FolderRevisionCompress(); let rev_compactor = FolderRevisionCompress();
// let history_persistence = SQLiteRevisionHistoryPersistence::new(object_id, pool.clone()); // let history_persistence = SQLiteRevisionHistoryPersistence::new(object_id, pool.clone());
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(object_id, pool); let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(object_id, pool);

View file

@ -2,7 +2,7 @@ use crate::manager::FolderId;
use bytes::Bytes; use bytes::Bytes;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_revision::{ use flowy_revision::{
RevisionCloudService, RevisionCompress, RevisionManager, RevisionObjectDeserializer, RevisionObjectSerializer, RevisionCloudService, RevisionManager, RevisionMergeable, RevisionObjectDeserializer, RevisionObjectSerializer,
RevisionWebSocket, RevisionWebSocket,
}; };
use flowy_sync::util::make_operations_from_revisions; use flowy_sync::util::make_operations_from_revisions;
@ -18,9 +18,10 @@ use parking_lot::RwLock;
use std::sync::Arc; use std::sync::Arc;
pub struct FolderEditor { pub struct FolderEditor {
#[allow(dead_code)]
user_id: String, user_id: String,
#[allow(dead_code)] #[allow(dead_code)]
pub(crate) folder_id: FolderId, folder_id: FolderId,
pub(crate) folder: Arc<RwLock<FolderPad>>, pub(crate) folder: Arc<RwLock<FolderPad>>,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>, rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
#[cfg(feature = "sync")] #[cfg(feature = "sync")]
@ -39,7 +40,9 @@ impl FolderEditor {
let cloud = Arc::new(FolderRevisionCloudService { let cloud = Arc::new(FolderRevisionCloudService {
token: token.to_string(), token: token.to_string(),
}); });
let folder = Arc::new(RwLock::new(rev_manager.load::<FolderRevisionSerde>(Some(cloud)).await?)); let folder = Arc::new(RwLock::new(
rev_manager.initialize::<FolderRevisionSerde>(Some(cloud)).await?,
));
let rev_manager = Arc::new(rev_manager); let rev_manager = Arc::new(rev_manager);
#[cfg(feature = "sync")] #[cfg(feature = "sync")]
@ -83,14 +86,7 @@ impl FolderEditor {
let FolderChangeset { operations: delta, md5 } = change; let FolderChangeset { operations: delta, md5 } = change;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes(); let delta_data = delta.json_bytes();
let revision = Revision::new( let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
&self.rev_manager.object_id,
base_rev_id,
rev_id,
delta_data,
&self.user_id,
md5,
);
let _ = futures::executor::block_on(async { self.rev_manager.add_local_revision(&revision).await })?; let _ = futures::executor::block_on(async { self.rev_manager.add_local_revision(&revision).await })?;
Ok(()) Ok(())
} }
@ -120,7 +116,7 @@ impl RevisionObjectSerializer for FolderRevisionSerde {
} }
pub struct FolderRevisionCompress(); pub struct FolderRevisionCompress();
impl RevisionCompress for FolderRevisionCompress { impl RevisionMergeable for FolderRevisionCompress {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
FolderRevisionSerde::combine_revisions(revisions) FolderRevisionSerde::combine_revisions(revisions)
} }

View file

@ -9,11 +9,12 @@ use flowy_error::{FlowyError, FlowyResult};
use flowy_folder_data_model::revision::{AppRevision, FolderRevision, ViewRevision, WorkspaceRevision}; use flowy_folder_data_model::revision::{AppRevision, FolderRevision, ViewRevision, WorkspaceRevision};
use flowy_revision::reset::{RevisionResettable, RevisionStructReset}; use flowy_revision::reset::{RevisionResettable, RevisionStructReset};
use flowy_sync::client_folder::make_folder_rev_json_str; use flowy_sync::client_folder::make_folder_rev_json_str;
use flowy_sync::client_folder::FolderPad;
use flowy_sync::entities::revision::Revision; use flowy_sync::entities::revision::Revision;
use flowy_sync::server_folder::FolderOperationsBuilder; use flowy_sync::server_folder::FolderOperationsBuilder;
use flowy_sync::{client_folder::FolderPad, entities::revision::md5};
use crate::services::persistence::rev_sqlite::SQLiteFolderRevisionPersistence; use crate::services::persistence::rev_sqlite::SQLiteFolderRevisionPersistence;
use flowy_sync::util::md5;
use std::sync::Arc; use std::sync::Arc;
const V1_MIGRATION: &str = "FOLDER_V1_MIGRATION"; const V1_MIGRATION: &str = "FOLDER_V1_MIGRATION";

View file

@ -11,7 +11,7 @@ use crate::{
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_folder_data_model::revision::{AppRevision, TrashRevision, ViewRevision, WorkspaceRevision}; use flowy_folder_data_model::revision::{AppRevision, TrashRevision, ViewRevision, WorkspaceRevision};
use flowy_revision::disk::{RevisionDiskCache, RevisionRecord, RevisionState}; use flowy_revision::disk::{RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{client_folder::FolderPad, entities::revision::Revision}; use flowy_sync::{client_folder::FolderPad, entities::revision::Revision};
use crate::services::persistence::rev_sqlite::SQLiteFolderRevisionPersistence; use crate::services::persistence::rev_sqlite::SQLiteFolderRevisionPersistence;
@ -111,8 +111,8 @@ impl FolderPersistence {
let pool = self.database.db_pool()?; let pool = self.database.db_pool()?;
let json = folder.to_json()?; let json = folder.to_json()?;
let delta_data = FolderOperationsBuilder::new().insert(&json).build().json_bytes(); let delta_data = FolderOperationsBuilder::new().insert(&json).build().json_bytes();
let revision = Revision::initial_revision(user_id, folder_id.as_ref(), delta_data); let revision = Revision::initial_revision(folder_id.as_ref(), delta_data);
let record = RevisionRecord { let record = SyncRecord {
revision, revision,
state: RevisionState::Sync, state: RevisionState::Sync,
write_to_disk: true, write_to_disk: true,

View file

@ -7,9 +7,9 @@ use flowy_database::{
ConnectionPool, ConnectionPool,
}; };
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState}; use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{ use flowy_sync::{
entities::revision::{RevType, Revision, RevisionRange}, entities::revision::{Revision, RevisionRange},
util::md5, util::md5,
}; };
@ -23,7 +23,7 @@ pub struct SQLiteFolderRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteFolderRevisionPersistence { impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteFolderRevisionPersistence {
type Error = FlowyError; type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> { fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let _ = FolderRevisionSql::create(revision_records, &*conn)?; let _ = FolderRevisionSql::create(revision_records, &*conn)?;
Ok(()) Ok(())
@ -37,7 +37,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteFolderRevisionPersistence
&self, &self,
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let records = FolderRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?; let records = FolderRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?;
Ok(records) Ok(records)
@ -47,7 +47,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteFolderRevisionPersistence
&self, &self,
object_id: &str, object_id: &str,
range: &RevisionRange, range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = &*self.pool.get().map_err(internal_error)?; let conn = &*self.pool.get().map_err(internal_error)?;
let revisions = FolderRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?; let revisions = FolderRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?;
Ok(revisions) Ok(revisions)
@ -74,7 +74,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteFolderRevisionPersistence
&self, &self,
object_id: &str, object_id: &str,
deleted_rev_ids: Option<Vec<i64>>, deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>, inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
conn.immediate_transaction::<_, FlowyError, _>(|| { conn.immediate_transaction::<_, FlowyError, _>(|| {
@ -97,7 +97,7 @@ impl SQLiteFolderRevisionPersistence {
struct FolderRevisionSql {} struct FolderRevisionSql {}
impl FolderRevisionSql { impl FolderRevisionSql {
fn create(revision_records: Vec<RevisionRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> { fn create(revision_records: Vec<SyncRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
// Batch insert: https://diesel.rs/guides/all-about-inserts.html // Batch insert: https://diesel.rs/guides/all-about-inserts.html
let records = revision_records let records = revision_records
@ -143,7 +143,7 @@ impl FolderRevisionSql {
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let mut sql = dsl::rev_table.filter(dsl::doc_id.eq(object_id)).into_boxed(); let mut sql = dsl::rev_table.filter(dsl::doc_id.eq(object_id)).into_boxed();
if let Some(rev_ids) = rev_ids { if let Some(rev_ids) = rev_ids {
sql = sql.filter(dsl::rev_id.eq_any(rev_ids)); sql = sql.filter(dsl::rev_id.eq_any(rev_ids));
@ -162,7 +162,7 @@ impl FolderRevisionSql {
object_id: &str, object_id: &str,
range: RevisionRange, range: RevisionRange,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let rev_tables = dsl::rev_table let rev_tables = dsl::rev_table
.filter(dsl::rev_id.ge(range.start)) .filter(dsl::rev_id.ge(range.start))
.filter(dsl::rev_id.le(range.end)) .filter(dsl::rev_id.le(range.end))
@ -220,17 +220,16 @@ impl std::default::Default for TextRevisionState {
} }
} }
fn mk_revision_record_from_table(user_id: &str, table: RevisionTable) -> RevisionRecord { fn mk_revision_record_from_table(_user_id: &str, table: RevisionTable) -> SyncRecord {
let md5 = md5(&table.data); let md5 = md5(&table.data);
let revision = Revision::new( let revision = Revision::new(
&table.doc_id, &table.doc_id,
table.base_rev_id, table.base_rev_id,
table.rev_id, table.rev_id,
Bytes::from(table.data), Bytes::from(table.data),
user_id,
md5, md5,
); );
RevisionRecord { SyncRecord {
revision, revision,
state: table.state.into(), state: table.state.into(),
write_to_disk: false, write_to_disk: false,
@ -264,21 +263,3 @@ impl std::convert::From<i32> for RevTableType {
} }
} }
} }
impl std::convert::From<RevType> for RevTableType {
fn from(ty: RevType) -> Self {
match ty {
RevType::DeprecatedLocal => RevTableType::Local,
RevType::DeprecatedRemote => RevTableType::Remote,
}
}
}
impl std::convert::From<RevTableType> for RevType {
fn from(ty: RevTableType) -> Self {
match ty {
RevTableType::Local => RevType::DeprecatedLocal,
RevTableType::Remote => RevType::DeprecatedRemote,
}
}
}

View file

@ -78,12 +78,12 @@ struct FolderConflictResolver {
} }
impl ConflictResolver<FolderResolveOperations> for FolderConflictResolver { impl ConflictResolver<FolderResolveOperations> for FolderConflictResolver {
fn compose_operations(&self, operations: FolderResolveOperations) -> BoxResultFuture<OperationsMD5, FlowyError> { fn compose_operations(&self, operations: FolderResolveOperations) -> BoxResultFuture<RevisionMD5, FlowyError> {
let operations = operations.into_inner(); let operations = operations.into_inner();
let folder_pad = self.folder_pad.clone(); let folder_pad = self.folder_pad.clone();
Box::pin(async move { Box::pin(async move {
let md5 = folder_pad.write().compose_remote_operations(operations)?; let md5 = folder_pad.write().compose_remote_operations(operations)?;
Ok(md5) Ok(md5.into())
}) })
} }
@ -113,11 +113,11 @@ impl ConflictResolver<FolderResolveOperations> for FolderConflictResolver {
}) })
} }
fn reset_operations(&self, operations: FolderResolveOperations) -> BoxResultFuture<OperationsMD5, FlowyError> { fn reset_operations(&self, operations: FolderResolveOperations) -> BoxResultFuture<RevisionMD5, FlowyError> {
let folder_pad = self.folder_pad.clone(); let folder_pad = self.folder_pad.clone();
Box::pin(async move { Box::pin(async move {
let md5 = folder_pad.write().reset_folder(operations.into_inner())?; let md5 = folder_pad.write().reset_folder(operations.into_inner())?;
Ok(md5) Ok(md5.into())
}) })
} }
} }

View file

@ -292,53 +292,53 @@ async fn folder_sync_revision_seq() {
.await; .await;
} }
#[tokio::test] // #[tokio::test]
async fn folder_sync_revision_with_new_app() { // async fn folder_sync_revision_with_new_app() {
let mut test = FolderTest::new().await; // let mut test = FolderTest::new().await;
let app_name = "AppFlowy contributors".to_owned(); // let app_name = "AppFlowy contributors".to_owned();
let app_desc = "Welcome to be a AppFlowy contributor".to_owned(); // let app_desc = "Welcome to be a AppFlowy contributor".to_owned();
//
// test.run_scripts(vec![
// AssertNextSyncRevId(Some(1)),
// AssertNextSyncRevId(Some(2)),
// CreateApp {
// name: app_name.clone(),
// desc: app_desc.clone(),
// },
// AssertCurrentRevId(3),
// AssertNextSyncRevId(Some(3)),
// AssertNextSyncRevId(None),
// ])
// .await;
//
// let app = test.app.clone();
// assert_eq!(app.name, app_name);
// assert_eq!(app.desc, app_desc);
// test.run_scripts(vec![ReadApp(app.id.clone()), AssertApp(app)]).await;
// }
test.run_scripts(vec![ // #[tokio::test]
AssertNextSyncRevId(Some(1)), // async fn folder_sync_revision_with_new_view() {
AssertNextSyncRevId(Some(2)), // let mut test = FolderTest::new().await;
CreateApp { // let view_name = "AppFlowy features".to_owned();
name: app_name.clone(), // let view_desc = "😁".to_owned();
desc: app_desc.clone(), //
}, // test.run_scripts(vec![
AssertCurrentRevId(3), // AssertNextSyncRevId(Some(1)),
AssertNextSyncRevId(Some(3)), // AssertNextSyncRevId(Some(2)),
AssertNextSyncRevId(None), // CreateView {
]) // name: view_name.clone(),
.await; // desc: view_desc.clone(),
// data_type: ViewDataFormatPB::DeltaFormat,
let app = test.app.clone(); // },
assert_eq!(app.name, app_name); // AssertCurrentRevId(3),
assert_eq!(app.desc, app_desc); // AssertNextSyncRevId(Some(3)),
test.run_scripts(vec![ReadApp(app.id.clone()), AssertApp(app)]).await; // AssertNextSyncRevId(None),
} // ])
// .await;
#[tokio::test] //
async fn folder_sync_revision_with_new_view() { // let view = test.view.clone();
let mut test = FolderTest::new().await; // assert_eq!(view.name, view_name);
let view_name = "AppFlowy features".to_owned(); // test.run_scripts(vec![ReadView(view.id.clone()), AssertView(view)])
let view_desc = "😁".to_owned(); // .await;
// }
test.run_scripts(vec![
AssertNextSyncRevId(Some(1)),
AssertNextSyncRevId(Some(2)),
CreateView {
name: view_name.clone(),
desc: view_desc.clone(),
data_type: ViewDataFormatPB::DeltaFormat,
},
AssertCurrentRevId(3),
AssertNextSyncRevId(Some(3)),
AssertNextSyncRevId(None),
])
.await;
let view = test.view.clone();
assert_eq!(view.name, view_name);
test.run_scripts(vec![ReadView(view.id.clone()), AssertView(view)])
.await;
}

View file

@ -70,6 +70,7 @@ pub enum FolderScript {
DeleteAllTrash, DeleteAllTrash,
// Sync // Sync
#[allow(dead_code)]
AssertCurrentRevId(i64), AssertCurrentRevId(i64),
AssertNextSyncRevId(Option<i64>), AssertNextSyncRevId(Option<i64>),
AssertRevisionState { AssertRevisionState {

View file

@ -42,7 +42,7 @@ pub(crate) async fn update_grid_setting_handler(
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: GridSettingChangesetParams = data.into_inner().try_into()?; let params: GridSettingChangesetParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
if let Some(insert_params) = params.insert_group { if let Some(insert_params) = params.insert_group {
let _ = editor.insert_group(insert_params).await?; let _ = editor.insert_group(insert_params).await?;
} }
@ -67,7 +67,7 @@ pub(crate) async fn get_grid_blocks_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<RepeatedBlockPB, FlowyError> { ) -> DataResult<RepeatedBlockPB, FlowyError> {
let params: QueryGridBlocksParams = data.into_inner().try_into()?; let params: QueryGridBlocksParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let repeated_grid_block = editor.get_blocks(Some(params.block_ids)).await?; let repeated_grid_block = editor.get_blocks(Some(params.block_ids)).await?;
data_result(repeated_grid_block) data_result(repeated_grid_block)
} }
@ -78,7 +78,7 @@ pub(crate) async fn get_fields_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<RepeatedFieldPB, FlowyError> { ) -> DataResult<RepeatedFieldPB, FlowyError> {
let params: QueryFieldParams = data.into_inner().try_into()?; let params: QueryFieldParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let field_orders = params let field_orders = params
.field_ids .field_ids
.items .items
@ -96,7 +96,7 @@ pub(crate) async fn update_field_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let changeset: FieldChangesetParams = data.into_inner().try_into()?; let changeset: FieldChangesetParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&changeset.grid_id)?; let editor = manager.get_grid_editor(&changeset.grid_id).await?;
let _ = editor.update_field(changeset).await?; let _ = editor.update_field(changeset).await?;
Ok(()) Ok(())
} }
@ -107,7 +107,7 @@ pub(crate) async fn update_field_type_option_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: UpdateFieldTypeOptionParams = data.into_inner().try_into()?; let params: UpdateFieldTypeOptionParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let _ = editor let _ = editor
.update_field_type_option(&params.grid_id, &params.field_id, params.type_option_data) .update_field_type_option(&params.grid_id, &params.field_id, params.type_option_data)
.await?; .await?;
@ -120,7 +120,7 @@ pub(crate) async fn delete_field_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: FieldIdParams = data.into_inner().try_into()?; let params: FieldIdParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let _ = editor.delete_field(&params.field_id).await?; let _ = editor.delete_field(&params.field_id).await?;
Ok(()) Ok(())
} }
@ -131,7 +131,7 @@ pub(crate) async fn switch_to_field_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: EditFieldParams = data.into_inner().try_into()?; let params: EditFieldParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
editor editor
.switch_to_field_type(&params.field_id, &params.field_type) .switch_to_field_type(&params.field_id, &params.field_type)
.await?; .await?;
@ -157,7 +157,7 @@ pub(crate) async fn duplicate_field_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: FieldIdParams = data.into_inner().try_into()?; let params: FieldIdParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let _ = editor.duplicate_field(&params.field_id).await?; let _ = editor.duplicate_field(&params.field_id).await?;
Ok(()) Ok(())
} }
@ -169,7 +169,7 @@ pub(crate) async fn get_field_type_option_data_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<FieldTypeOptionDataPB, FlowyError> { ) -> DataResult<FieldTypeOptionDataPB, FlowyError> {
let params: FieldTypeOptionIdParams = data.into_inner().try_into()?; let params: FieldTypeOptionIdParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
match editor.get_field_rev(&params.field_id).await { match editor.get_field_rev(&params.field_id).await {
None => Err(FlowyError::record_not_found()), None => Err(FlowyError::record_not_found()),
Some(field_rev) => { Some(field_rev) => {
@ -192,7 +192,7 @@ pub(crate) async fn create_field_type_option_data_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<FieldTypeOptionDataPB, FlowyError> { ) -> DataResult<FieldTypeOptionDataPB, FlowyError> {
let params: CreateFieldParams = data.into_inner().try_into()?; let params: CreateFieldParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let field_rev = editor let field_rev = editor
.create_new_field_rev(&params.field_type, params.type_option_data) .create_new_field_rev(&params.field_type, params.type_option_data)
.await?; .await?;
@ -212,7 +212,7 @@ pub(crate) async fn move_field_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: MoveFieldParams = data.into_inner().try_into()?; let params: MoveFieldParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let _ = editor.move_field(params).await?; let _ = editor.move_field(params).await?;
Ok(()) Ok(())
} }
@ -237,7 +237,7 @@ pub(crate) async fn get_row_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<OptionalRowPB, FlowyError> { ) -> DataResult<OptionalRowPB, FlowyError> {
let params: RowIdParams = data.into_inner().try_into()?; let params: RowIdParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let row = editor.get_row_rev(&params.row_id).await?.map(make_row_from_row_rev); let row = editor.get_row_rev(&params.row_id).await?.map(make_row_from_row_rev);
data_result(OptionalRowPB { row }) data_result(OptionalRowPB { row })
@ -249,7 +249,7 @@ pub(crate) async fn delete_row_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: RowIdParams = data.into_inner().try_into()?; let params: RowIdParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let _ = editor.delete_row(&params.row_id).await?; let _ = editor.delete_row(&params.row_id).await?;
Ok(()) Ok(())
} }
@ -260,7 +260,7 @@ pub(crate) async fn duplicate_row_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: RowIdParams = data.into_inner().try_into()?; let params: RowIdParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
let _ = editor.duplicate_row(&params.row_id).await?; let _ = editor.duplicate_row(&params.row_id).await?;
Ok(()) Ok(())
} }
@ -271,7 +271,7 @@ pub(crate) async fn move_row_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: MoveRowParams = data.into_inner().try_into()?; let params: MoveRowParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.view_id)?; let editor = manager.get_grid_editor(&params.view_id).await?;
let _ = editor.move_row(params).await?; let _ = editor.move_row(params).await?;
Ok(()) Ok(())
} }
@ -282,7 +282,7 @@ pub(crate) async fn create_table_row_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<RowPB, FlowyError> { ) -> DataResult<RowPB, FlowyError> {
let params: CreateRowParams = data.into_inner().try_into()?; let params: CreateRowParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(params.grid_id.as_ref())?; let editor = manager.get_grid_editor(params.grid_id.as_ref()).await?;
let row = editor.create_row(params).await?; let row = editor.create_row(params).await?;
data_result(row) data_result(row)
} }
@ -293,7 +293,7 @@ pub(crate) async fn get_cell_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<GridCellPB, FlowyError> { ) -> DataResult<GridCellPB, FlowyError> {
let params: GridCellIdParams = data.into_inner().try_into()?; let params: GridCellIdParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
match editor.get_cell(&params).await { match editor.get_cell(&params).await {
None => data_result(GridCellPB::empty(&params.field_id)), None => data_result(GridCellPB::empty(&params.field_id)),
Some(cell) => data_result(cell), Some(cell) => data_result(cell),
@ -306,7 +306,7 @@ pub(crate) async fn update_cell_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let changeset: CellChangesetPB = data.into_inner(); let changeset: CellChangesetPB = data.into_inner();
let editor = manager.get_grid_editor(&changeset.grid_id)?; let editor = manager.get_grid_editor(&changeset.grid_id).await?;
let _ = editor.update_cell(changeset).await?; let _ = editor.update_cell(changeset).await?;
Ok(()) Ok(())
} }
@ -317,7 +317,7 @@ pub(crate) async fn new_select_option_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<SelectOptionPB, FlowyError> { ) -> DataResult<SelectOptionPB, FlowyError> {
let params: CreateSelectOptionParams = data.into_inner().try_into()?; let params: CreateSelectOptionParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
match editor.get_field_rev(&params.field_id).await { match editor.get_field_rev(&params.field_id).await {
None => Err(ErrorCode::InvalidData.into()), None => Err(ErrorCode::InvalidData.into()),
Some(field_rev) => { Some(field_rev) => {
@ -334,7 +334,7 @@ pub(crate) async fn update_select_option_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let changeset: SelectOptionChangeset = data.into_inner().try_into()?; let changeset: SelectOptionChangeset = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&changeset.cell_identifier.grid_id)?; let editor = manager.get_grid_editor(&changeset.cell_identifier.grid_id).await?;
let _ = editor let _ = editor
.modify_field_rev(&changeset.cell_identifier.field_id, |field_rev| { .modify_field_rev(&changeset.cell_identifier.field_id, |field_rev| {
@ -391,7 +391,7 @@ pub(crate) async fn get_select_option_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<SelectOptionCellDataPB, FlowyError> { ) -> DataResult<SelectOptionCellDataPB, FlowyError> {
let params: GridCellIdParams = data.into_inner().try_into()?; let params: GridCellIdParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.grid_id)?; let editor = manager.get_grid_editor(&params.grid_id).await?;
match editor.get_field_rev(&params.field_id).await { match editor.get_field_rev(&params.field_id).await {
None => { None => {
tracing::error!("Can't find the select option field with id: {}", params.field_id); tracing::error!("Can't find the select option field with id: {}", params.field_id);
@ -420,7 +420,7 @@ pub(crate) async fn update_select_option_cell_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: SelectOptionCellChangesetParams = data.into_inner().try_into()?; let params: SelectOptionCellChangesetParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.cell_identifier.grid_id)?; let editor = manager.get_grid_editor(&params.cell_identifier.grid_id).await?;
let _ = editor.update_cell(params.into()).await?; let _ = editor.update_cell(params.into()).await?;
Ok(()) Ok(())
} }
@ -431,7 +431,7 @@ pub(crate) async fn update_date_cell_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> Result<(), FlowyError> { ) -> Result<(), FlowyError> {
let params: DateChangesetParams = data.into_inner().try_into()?; let params: DateChangesetParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(&params.cell_identifier.grid_id)?; let editor = manager.get_grid_editor(&params.cell_identifier.grid_id).await?;
let _ = editor.update_cell(params.into()).await?; let _ = editor.update_cell(params.into()).await?;
Ok(()) Ok(())
} }
@ -442,7 +442,7 @@ pub(crate) async fn get_groups_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<RepeatedGridGroupPB, FlowyError> { ) -> DataResult<RepeatedGridGroupPB, FlowyError> {
let params: GridIdPB = data.into_inner(); let params: GridIdPB = data.into_inner();
let editor = manager.get_grid_editor(&params.value)?; let editor = manager.get_grid_editor(&params.value).await?;
let group = editor.load_groups().await?; let group = editor.load_groups().await?;
data_result(group) data_result(group)
} }
@ -453,7 +453,7 @@ pub(crate) async fn create_board_card_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> DataResult<RowPB, FlowyError> { ) -> DataResult<RowPB, FlowyError> {
let params: CreateRowParams = data.into_inner().try_into()?; let params: CreateRowParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(params.grid_id.as_ref())?; let editor = manager.get_grid_editor(params.grid_id.as_ref()).await?;
let row = editor.create_row(params).await?; let row = editor.create_row(params).await?;
data_result(row) data_result(row)
} }
@ -464,7 +464,7 @@ pub(crate) async fn move_group_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> FlowyResult<()> { ) -> FlowyResult<()> {
let params: MoveGroupParams = data.into_inner().try_into()?; let params: MoveGroupParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(params.view_id.as_ref())?; let editor = manager.get_grid_editor(params.view_id.as_ref()).await?;
let _ = editor.move_group(params).await?; let _ = editor.move_group(params).await?;
Ok(()) Ok(())
} }
@ -475,7 +475,7 @@ pub(crate) async fn move_group_row_handler(
manager: AppData<Arc<GridManager>>, manager: AppData<Arc<GridManager>>,
) -> FlowyResult<()> { ) -> FlowyResult<()> {
let params: MoveGroupRowParams = data.into_inner().try_into()?; let params: MoveGroupRowParams = data.into_inner().try_into()?;
let editor = manager.get_grid_editor(params.view_id.as_ref())?; let editor = manager.get_grid_editor(params.view_id.as_ref()).await?;
let _ = editor.move_group_row(params).await?; let _ = editor.move_group_row(params).await?;
Ok(()) Ok(())
} }

View file

@ -1,21 +1,27 @@
use crate::entities::GridLayout; use crate::entities::GridLayout;
use crate::services::block_editor::GridBlockRevisionCompress;
use crate::services::grid_editor::{GridRevisionCompress, GridRevisionEditor}; use crate::services::grid_editor::{GridRevisionCompress, GridRevisionEditor};
use crate::services::grid_view_manager::make_grid_view_rev_manager; use crate::services::grid_view_manager::make_grid_view_rev_manager;
use crate::services::persistence::block_index::BlockIndexCache; use crate::services::persistence::block_index::BlockIndexCache;
use crate::services::persistence::kv::GridKVPersistence; use crate::services::persistence::kv::GridKVPersistence;
use crate::services::persistence::migration::GridMigration; use crate::services::persistence::migration::GridMigration;
use crate::services::persistence::rev_sqlite::{SQLiteGridBlockRevisionPersistence, SQLiteGridRevisionPersistence}; use crate::services::persistence::rev_sqlite::SQLiteGridRevisionPersistence;
use crate::services::persistence::GridDatabase; use crate::services::persistence::GridDatabase;
use crate::services::tasks::GridTaskScheduler; use crate::services::tasks::GridTaskScheduler;
use bytes::Bytes; use bytes::Bytes;
use dashmap::DashMap;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_grid_data_model::revision::{BuildGridContext, GridRevision, GridViewRevision}; use flowy_grid_data_model::revision::{BuildGridContext, GridRevision, GridViewRevision};
use flowy_revision::{RevisionManager, RevisionPersistence, RevisionWebSocket, SQLiteRevisionSnapshotPersistence}; use flowy_revision::{
RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, RevisionWebSocket,
SQLiteRevisionSnapshotPersistence,
};
use flowy_sync::client_grid::{make_grid_block_operations, make_grid_operations, make_grid_view_operations}; use flowy_sync::client_grid::{make_grid_block_operations, make_grid_operations, make_grid_view_operations};
use flowy_sync::entities::revision::{RepeatedRevision, Revision}; use flowy_sync::entities::revision::Revision;
use lib_infra::ref_map::{RefCountHashMap, RefCountValue};
use crate::services::block_manager::make_grid_block_rev_manager;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::RwLock; use tokio::sync::RwLock;
@ -28,7 +34,7 @@ pub trait GridUser: Send + Sync {
pub type GridTaskSchedulerRwLock = Arc<RwLock<GridTaskScheduler>>; pub type GridTaskSchedulerRwLock = Arc<RwLock<GridTaskScheduler>>;
pub struct GridManager { pub struct GridManager {
grid_editors: Arc<DashMap<String, Arc<GridRevisionEditor>>>, grid_editors: RwLock<RefCountHashMap<Arc<GridRevisionEditor>>>,
grid_user: Arc<dyn GridUser>, grid_user: Arc<dyn GridUser>,
block_index_cache: Arc<BlockIndexCache>, block_index_cache: Arc<BlockIndexCache>,
#[allow(dead_code)] #[allow(dead_code)]
@ -43,7 +49,7 @@ impl GridManager {
_rev_web_socket: Arc<dyn RevisionWebSocket>, _rev_web_socket: Arc<dyn RevisionWebSocket>,
database: Arc<dyn GridDatabase>, database: Arc<dyn GridDatabase>,
) -> Self { ) -> Self {
let grid_editors = Arc::new(DashMap::new()); let grid_editors = RwLock::new(RefCountHashMap::new());
let kv_persistence = Arc::new(GridKVPersistence::new(database.clone())); let kv_persistence = Arc::new(GridKVPersistence::new(database.clone()));
let block_index_cache = Arc::new(BlockIndexCache::new(database.clone())); let block_index_cache = Arc::new(BlockIndexCache::new(database.clone()));
let task_scheduler = GridTaskScheduler::new(); let task_scheduler = GridTaskScheduler::new();
@ -67,7 +73,7 @@ impl GridManager {
} }
#[tracing::instrument(level = "debug", skip_all, err)] #[tracing::instrument(level = "debug", skip_all, err)]
pub async fn create_grid<T: AsRef<str>>(&self, grid_id: T, revisions: RepeatedRevision) -> FlowyResult<()> { pub async fn create_grid<T: AsRef<str>>(&self, grid_id: T, revisions: Vec<Revision>) -> FlowyResult<()> {
let grid_id = grid_id.as_ref(); let grid_id = grid_id.as_ref();
let db_pool = self.grid_user.db_pool()?; let db_pool = self.grid_user.db_pool()?;
let rev_manager = self.make_grid_rev_manager(grid_id, db_pool)?; let rev_manager = self.make_grid_rev_manager(grid_id, db_pool)?;
@ -77,7 +83,7 @@ impl GridManager {
} }
#[tracing::instrument(level = "debug", skip_all, err)] #[tracing::instrument(level = "debug", skip_all, err)]
async fn create_grid_view<T: AsRef<str>>(&self, view_id: T, revisions: RepeatedRevision) -> FlowyResult<()> { async fn create_grid_view<T: AsRef<str>>(&self, view_id: T, revisions: Vec<Revision>) -> FlowyResult<()> {
let view_id = view_id.as_ref(); let view_id = view_id.as_ref();
let rev_manager = make_grid_view_rev_manager(&self.grid_user, view_id).await?; let rev_manager = make_grid_view_rev_manager(&self.grid_user, view_id).await?;
let _ = rev_manager.reset_object(revisions).await?; let _ = rev_manager.reset_object(revisions).await?;
@ -85,10 +91,9 @@ impl GridManager {
} }
#[tracing::instrument(level = "debug", skip_all, err)] #[tracing::instrument(level = "debug", skip_all, err)]
pub async fn create_grid_block<T: AsRef<str>>(&self, block_id: T, revisions: RepeatedRevision) -> FlowyResult<()> { pub async fn create_grid_block<T: AsRef<str>>(&self, block_id: T, revisions: Vec<Revision>) -> FlowyResult<()> {
let block_id = block_id.as_ref(); let block_id = block_id.as_ref();
let db_pool = self.grid_user.db_pool()?; let rev_manager = make_grid_block_rev_manager(&self.grid_user, block_id)?;
let rev_manager = self.make_grid_block_rev_manager(block_id, db_pool)?;
let _ = rev_manager.reset_object(revisions).await?; let _ = rev_manager.reset_object(revisions).await?;
Ok(()) Ok(())
} }
@ -104,35 +109,33 @@ impl GridManager {
pub async fn close_grid<T: AsRef<str>>(&self, grid_id: T) -> FlowyResult<()> { pub async fn close_grid<T: AsRef<str>>(&self, grid_id: T) -> FlowyResult<()> {
let grid_id = grid_id.as_ref(); let grid_id = grid_id.as_ref();
tracing::Span::current().record("grid_id", &grid_id); tracing::Span::current().record("grid_id", &grid_id);
self.grid_editors.remove(grid_id);
self.grid_editors.write().await.remove(grid_id);
self.task_scheduler.write().await.unregister_handler(grid_id); self.task_scheduler.write().await.unregister_handler(grid_id);
Ok(()) Ok(())
} }
// #[tracing::instrument(level = "debug", skip(self), err)] // #[tracing::instrument(level = "debug", skip(self), err)]
pub fn get_grid_editor(&self, grid_id: &str) -> FlowyResult<Arc<GridRevisionEditor>> { pub async fn get_grid_editor(&self, grid_id: &str) -> FlowyResult<Arc<GridRevisionEditor>> {
match self.grid_editors.get(grid_id) { match self.grid_editors.read().await.get(grid_id) {
None => Err(FlowyError::internal().context("Should call open_grid function first")), None => Err(FlowyError::internal().context("Should call open_grid function first")),
Some(editor) => Ok(editor.clone()), Some(editor) => Ok(editor),
} }
} }
async fn get_or_create_grid_editor(&self, grid_id: &str) -> FlowyResult<Arc<GridRevisionEditor>> { async fn get_or_create_grid_editor(&self, grid_id: &str) -> FlowyResult<Arc<GridRevisionEditor>> {
match self.grid_editors.get(grid_id) { if let Some(editor) = self.grid_editors.read().await.get(grid_id) {
None => { return Ok(editor);
if let Some(editor) = self.grid_editors.get(grid_id) {
tracing::warn!("Grid:{} already open", grid_id);
Ok(editor.clone())
} else {
let db_pool = self.grid_user.db_pool()?;
let editor = self.make_grid_rev_editor(grid_id, db_pool).await?;
self.grid_editors.insert(grid_id.to_string(), editor.clone());
self.task_scheduler.write().await.register_handler(editor.clone());
Ok(editor)
}
}
Some(editor) => Ok(editor.clone()),
} }
let db_pool = self.grid_user.db_pool()?;
let editor = self.make_grid_rev_editor(grid_id, db_pool).await?;
self.grid_editors
.write()
.await
.insert(grid_id.to_string(), editor.clone());
self.task_scheduler.write().await.register_handler(editor.clone());
Ok(editor)
} }
#[tracing::instrument(level = "trace", skip(self, pool), err)] #[tracing::instrument(level = "trace", skip(self, pool), err)]
@ -161,31 +164,17 @@ impl GridManager {
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> { ) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = self.grid_user.user_id()?; let user_id = self.grid_user.user_id()?;
let disk_cache = SQLiteGridRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteGridRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, grid_id, disk_cache); let configuration = RevisionPersistenceConfiguration::new(2, false);
let rev_persistence = RevisionPersistence::new(&user_id, grid_id, disk_cache, configuration);
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(grid_id, pool); let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(grid_id, pool);
let rev_compactor = GridRevisionCompress(); let rev_compactor = GridRevisionCompress();
let rev_manager = RevisionManager::new(&user_id, grid_id, rev_persistence, rev_compactor, snapshot_persistence); let rev_manager = RevisionManager::new(&user_id, grid_id, rev_persistence, rev_compactor, snapshot_persistence);
Ok(rev_manager) Ok(rev_manager)
} }
fn make_grid_block_rev_manager(
&self,
block_id: &str,
pool: Arc<ConnectionPool>,
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = self.grid_user.user_id()?;
let disk_cache = SQLiteGridBlockRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, block_id, disk_cache);
let rev_compactor = GridBlockRevisionCompress();
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(block_id, pool);
let rev_manager =
RevisionManager::new(&user_id, block_id, rev_persistence, rev_compactor, snapshot_persistence);
Ok(rev_manager)
}
} }
pub async fn make_grid_view_data( pub async fn make_grid_view_data(
user_id: &str, _user_id: &str,
view_id: &str, view_id: &str,
layout: GridLayout, layout: GridLayout,
grid_manager: Arc<GridManager>, grid_manager: Arc<GridManager>,
@ -208,9 +197,8 @@ pub async fn make_grid_view_data(
// Create grid's block // Create grid's block
let grid_block_delta = make_grid_block_operations(block_meta_data); let grid_block_delta = make_grid_block_operations(block_meta_data);
let block_delta_data = grid_block_delta.json_bytes(); let block_delta_data = grid_block_delta.json_bytes();
let repeated_revision: RepeatedRevision = let revision = Revision::initial_revision(block_id, block_delta_data);
Revision::initial_revision(user_id, block_id, block_delta_data).into(); let _ = grid_manager.create_grid_block(&block_id, vec![revision]).await?;
let _ = grid_manager.create_grid_block(&block_id, repeated_revision).await?;
} }
// Will replace the grid_id with the value returned by the gen_grid_id() // Will replace the grid_id with the value returned by the gen_grid_id()
@ -220,9 +208,8 @@ pub async fn make_grid_view_data(
// Create grid // Create grid
let grid_rev_delta = make_grid_operations(&grid_rev); let grid_rev_delta = make_grid_operations(&grid_rev);
let grid_rev_delta_bytes = grid_rev_delta.json_bytes(); let grid_rev_delta_bytes = grid_rev_delta.json_bytes();
let repeated_revision: RepeatedRevision = let revision = Revision::initial_revision(&grid_id, grid_rev_delta_bytes.clone());
Revision::initial_revision(user_id, &grid_id, grid_rev_delta_bytes.clone()).into(); let _ = grid_manager.create_grid(&grid_id, vec![revision]).await?;
let _ = grid_manager.create_grid(&grid_id, repeated_revision).await?;
// Create grid view // Create grid view
let grid_view = if grid_view_revision_data.is_empty() { let grid_view = if grid_view_revision_data.is_empty() {
@ -232,9 +219,14 @@ pub async fn make_grid_view_data(
}; };
let grid_view_delta = make_grid_view_operations(&grid_view); let grid_view_delta = make_grid_view_operations(&grid_view);
let grid_view_delta_bytes = grid_view_delta.json_bytes(); let grid_view_delta_bytes = grid_view_delta.json_bytes();
let repeated_revision: RepeatedRevision = let revision = Revision::initial_revision(view_id, grid_view_delta_bytes);
Revision::initial_revision(user_id, view_id, grid_view_delta_bytes).into(); let _ = grid_manager.create_grid_view(view_id, vec![revision]).await?;
let _ = grid_manager.create_grid_view(view_id, repeated_revision).await?;
Ok(grid_rev_delta_bytes) Ok(grid_rev_delta_bytes)
} }
impl RefCountValue for GridRevisionEditor {
fn did_remove(&self) {
self.close();
}
}

View file

@ -3,7 +3,7 @@ use bytes::Bytes;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_grid_data_model::revision::{CellRevision, GridBlockRevision, RowChangeset, RowRevision}; use flowy_grid_data_model::revision::{CellRevision, GridBlockRevision, RowChangeset, RowRevision};
use flowy_revision::{ use flowy_revision::{
RevisionCloudService, RevisionCompress, RevisionManager, RevisionObjectDeserializer, RevisionObjectSerializer, RevisionCloudService, RevisionManager, RevisionMergeable, RevisionObjectDeserializer, RevisionObjectSerializer,
}; };
use flowy_sync::client_grid::{GridBlockRevisionChangeset, GridBlockRevisionPad}; use flowy_sync::client_grid::{GridBlockRevisionChangeset, GridBlockRevisionPad};
use flowy_sync::entities::revision::Revision; use flowy_sync::entities::revision::Revision;
@ -17,6 +17,7 @@ use std::sync::Arc;
use tokio::sync::RwLock; use tokio::sync::RwLock;
pub struct GridBlockRevisionEditor { pub struct GridBlockRevisionEditor {
#[allow(dead_code)]
user_id: String, user_id: String,
pub block_id: String, pub block_id: String,
pad: Arc<RwLock<GridBlockRevisionPad>>, pad: Arc<RwLock<GridBlockRevisionPad>>,
@ -33,7 +34,7 @@ impl GridBlockRevisionEditor {
let cloud = Arc::new(GridBlockRevisionCloudService { let cloud = Arc::new(GridBlockRevisionCloudService {
token: token.to_owned(), token: token.to_owned(),
}); });
let block_revision_pad = rev_manager.load::<GridBlockRevisionSerde>(Some(cloud)).await?; let block_revision_pad = rev_manager.initialize::<GridBlockRevisionSerde>(Some(cloud)).await?;
let pad = Arc::new(RwLock::new(block_revision_pad)); let pad = Arc::new(RwLock::new(block_revision_pad));
let rev_manager = Arc::new(rev_manager); let rev_manager = Arc::new(rev_manager);
let user_id = user_id.to_owned(); let user_id = user_id.to_owned();
@ -167,17 +168,9 @@ impl GridBlockRevisionEditor {
async fn apply_change(&self, change: GridBlockRevisionChangeset) -> FlowyResult<()> { async fn apply_change(&self, change: GridBlockRevisionChangeset) -> FlowyResult<()> {
let GridBlockRevisionChangeset { operations: delta, md5 } = change; let GridBlockRevisionChangeset { operations: delta, md5 } = change;
let user_id = self.user_id.clone();
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes(); let delta_data = delta.json_bytes();
let revision = Revision::new( let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
&self.rev_manager.object_id,
base_rev_id,
rev_id,
delta_data,
&user_id,
md5,
);
let _ = self.rev_manager.add_local_revision(&revision).await?; let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(()) Ok(())
} }
@ -212,7 +205,7 @@ impl RevisionObjectSerializer for GridBlockRevisionSerde {
} }
pub struct GridBlockRevisionCompress(); pub struct GridBlockRevisionCompress();
impl RevisionCompress for GridBlockRevisionCompress { impl RevisionMergeable for GridBlockRevisionCompress {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
GridBlockRevisionSerde::combine_revisions(revisions) GridBlockRevisionSerde::combine_revisions(revisions)
} }

View file

@ -6,11 +6,14 @@ use crate::services::persistence::block_index::BlockIndexCache;
use crate::services::persistence::rev_sqlite::SQLiteGridBlockRevisionPersistence; use crate::services::persistence::rev_sqlite::SQLiteGridBlockRevisionPersistence;
use crate::services::row::{block_from_row_orders, make_row_from_row_rev, GridBlockSnapshot}; use crate::services::row::{block_from_row_orders, make_row_from_row_rev, GridBlockSnapshot};
use dashmap::DashMap; use dashmap::DashMap;
use flowy_database::ConnectionPool;
use flowy_error::FlowyResult; use flowy_error::FlowyResult;
use flowy_grid_data_model::revision::{ use flowy_grid_data_model::revision::{
GridBlockMetaRevision, GridBlockMetaRevisionChangeset, RowChangeset, RowRevision, GridBlockMetaRevision, GridBlockMetaRevisionChangeset, RowChangeset, RowRevision,
}; };
use flowy_revision::{RevisionManager, RevisionPersistence, SQLiteRevisionSnapshotPersistence}; use flowy_revision::{
RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, SQLiteRevisionSnapshotPersistence,
};
use std::borrow::Cow; use std::borrow::Cow;
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
@ -44,7 +47,7 @@ impl GridBlockManager {
match self.block_editors.get(block_id) { match self.block_editors.get(block_id) {
None => { None => {
tracing::error!("This is a fatal error, block with id:{} is not exist", block_id); tracing::error!("This is a fatal error, block with id:{} is not exist", block_id);
let editor = Arc::new(make_block_editor(&self.user, block_id).await?); let editor = Arc::new(make_grid_block_editor(&self.user, block_id).await?);
self.block_editors.insert(block_id.to_owned(), editor.clone()); self.block_editors.insert(block_id.to_owned(), editor.clone());
Ok(editor) Ok(editor)
} }
@ -259,23 +262,32 @@ async fn make_block_editors(
) -> FlowyResult<DashMap<String, Arc<GridBlockRevisionEditor>>> { ) -> FlowyResult<DashMap<String, Arc<GridBlockRevisionEditor>>> {
let editor_map = DashMap::new(); let editor_map = DashMap::new();
for block_meta_rev in block_meta_revs { for block_meta_rev in block_meta_revs {
let editor = make_block_editor(user, &block_meta_rev.block_id).await?; let editor = make_grid_block_editor(user, &block_meta_rev.block_id).await?;
editor_map.insert(block_meta_rev.block_id.clone(), Arc::new(editor)); editor_map.insert(block_meta_rev.block_id.clone(), Arc::new(editor));
} }
Ok(editor_map) Ok(editor_map)
} }
async fn make_block_editor(user: &Arc<dyn GridUser>, block_id: &str) -> FlowyResult<GridBlockRevisionEditor> { async fn make_grid_block_editor(user: &Arc<dyn GridUser>, block_id: &str) -> FlowyResult<GridBlockRevisionEditor> {
tracing::trace!("Open block:{} editor", block_id); tracing::trace!("Open block:{} editor", block_id);
let token = user.token()?; let token = user.token()?;
let user_id = user.user_id()?; let user_id = user.user_id()?;
let pool = user.db_pool()?; let rev_manager = make_grid_block_rev_manager(user, block_id)?;
GridBlockRevisionEditor::new(&user_id, &token, block_id, rev_manager).await
}
pub fn make_grid_block_rev_manager(
user: &Arc<dyn GridUser>,
block_id: &str,
) -> FlowyResult<RevisionManager<Arc<ConnectionPool>>> {
let user_id = user.user_id()?;
let pool = user.db_pool()?;
let disk_cache = SQLiteGridBlockRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteGridBlockRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, block_id, disk_cache); let configuration = RevisionPersistenceConfiguration::new(4, false);
let rev_persistence = RevisionPersistence::new(&user_id, block_id, disk_cache, configuration);
let rev_compactor = GridBlockRevisionCompress(); let rev_compactor = GridBlockRevisionCompress();
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(block_id, pool); let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(block_id, pool);
let rev_manager = RevisionManager::new(&user_id, block_id, rev_persistence, rev_compactor, snapshot_persistence); let rev_manager = RevisionManager::new(&user_id, block_id, rev_persistence, rev_compactor, snapshot_persistence);
GridBlockRevisionEditor::new(&user_id, &token, block_id, rev_manager).await Ok(rev_manager)
} }

View file

@ -17,7 +17,7 @@ use bytes::Bytes;
use flowy_error::{ErrorCode, FlowyError, FlowyResult}; use flowy_error::{ErrorCode, FlowyError, FlowyResult};
use flowy_grid_data_model::revision::*; use flowy_grid_data_model::revision::*;
use flowy_revision::{ use flowy_revision::{
RevisionCloudService, RevisionCompress, RevisionManager, RevisionObjectDeserializer, RevisionObjectSerializer, RevisionCloudService, RevisionManager, RevisionMergeable, RevisionObjectDeserializer, RevisionObjectSerializer,
}; };
use flowy_sync::client_grid::{GridRevisionChangeset, GridRevisionPad, JsonDeserializer}; use flowy_sync::client_grid::{GridRevisionChangeset, GridRevisionPad, JsonDeserializer};
use flowy_sync::entities::revision::Revision; use flowy_sync::entities::revision::Revision;
@ -33,6 +33,7 @@ use tokio::sync::RwLock;
pub struct GridRevisionEditor { pub struct GridRevisionEditor {
pub grid_id: String, pub grid_id: String,
#[allow(dead_code)]
user: Arc<dyn GridUser>, user: Arc<dyn GridUser>,
grid_pad: Arc<RwLock<GridRevisionPad>>, grid_pad: Arc<RwLock<GridRevisionPad>>,
view_manager: Arc<GridViewManager>, view_manager: Arc<GridViewManager>,
@ -59,7 +60,7 @@ impl GridRevisionEditor {
) -> FlowyResult<Arc<Self>> { ) -> FlowyResult<Arc<Self>> {
let token = user.token()?; let token = user.token()?;
let cloud = Arc::new(GridRevisionCloudService { token }); let cloud = Arc::new(GridRevisionCloudService { token });
let grid_pad = rev_manager.load::<GridRevisionSerde>(Some(cloud)).await?; let grid_pad = rev_manager.initialize::<GridRevisionSerde>(Some(cloud)).await?;
let rev_manager = Arc::new(rev_manager); let rev_manager = Arc::new(rev_manager);
let grid_pad = Arc::new(RwLock::new(grid_pad)); let grid_pad = Arc::new(RwLock::new(grid_pad));
@ -93,6 +94,14 @@ impl GridRevisionEditor {
Ok(editor) Ok(editor)
} }
#[tracing::instrument(name = "close grid editor", level = "trace", skip_all)]
pub fn close(&self) {
let rev_manager = self.rev_manager.clone();
tokio::spawn(async move {
rev_manager.close().await;
});
}
/// Save the type-option data to disk and send a `GridNotification::DidUpdateField` notification /// Save the type-option data to disk and send a `GridNotification::DidUpdateField` notification
/// to dart side. /// to dart side.
/// ///
@ -757,17 +766,9 @@ impl GridRevisionEditor {
async fn apply_change(&self, change: GridRevisionChangeset) -> FlowyResult<()> { async fn apply_change(&self, change: GridRevisionChangeset) -> FlowyResult<()> {
let GridRevisionChangeset { operations: delta, md5 } = change; let GridRevisionChangeset { operations: delta, md5 } = change;
let user_id = self.user.user_id()?;
let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair(); let (base_rev_id, rev_id) = self.rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes(); let delta_data = delta.json_bytes();
let revision = Revision::new( let revision = Revision::new(&self.rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
&self.rev_manager.object_id,
base_rev_id,
rev_id,
delta_data,
&user_id,
md5,
);
let _ = self.rev_manager.add_local_revision(&revision).await?; let _ = self.rev_manager.add_local_revision(&revision).await?;
Ok(()) Ok(())
} }
@ -854,7 +855,7 @@ impl RevisionCloudService for GridRevisionCloudService {
pub struct GridRevisionCompress(); pub struct GridRevisionCompress();
impl RevisionCompress for GridRevisionCompress { impl RevisionMergeable for GridRevisionCompress {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
GridRevisionSerde::combine_revisions(revisions) GridRevisionSerde::combine_revisions(revisions)
} }

View file

@ -19,7 +19,7 @@ use flowy_grid_data_model::revision::{
RowChangeset, RowRevision, RowChangeset, RowRevision,
}; };
use flowy_revision::{ use flowy_revision::{
RevisionCloudService, RevisionCompress, RevisionManager, RevisionObjectDeserializer, RevisionObjectSerializer, RevisionCloudService, RevisionManager, RevisionMergeable, RevisionObjectDeserializer, RevisionObjectSerializer,
}; };
use flowy_sync::client_grid::{GridViewRevisionChangeset, GridViewRevisionPad}; use flowy_sync::client_grid::{GridViewRevisionChangeset, GridViewRevisionPad};
use flowy_sync::entities::revision::Revision; use flowy_sync::entities::revision::Revision;
@ -55,7 +55,7 @@ impl GridViewRevisionEditor {
let cloud = Arc::new(GridViewRevisionCloudService { let cloud = Arc::new(GridViewRevisionCloudService {
token: token.to_owned(), token: token.to_owned(),
}); });
let view_revision_pad = rev_manager.load::<GridViewRevisionSerde>(Some(cloud)).await?; let view_revision_pad = rev_manager.initialize::<GridViewRevisionSerde>(Some(cloud)).await?;
let pad = Arc::new(RwLock::new(view_revision_pad)); let pad = Arc::new(RwLock::new(view_revision_pad));
let rev_manager = Arc::new(rev_manager); let rev_manager = Arc::new(rev_manager);
let group_controller = new_group_controller( let group_controller = new_group_controller(
@ -454,14 +454,14 @@ async fn new_group_controller_with_field_rev(
} }
async fn apply_change( async fn apply_change(
user_id: &str, _user_id: &str,
rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>, rev_manager: Arc<RevisionManager<Arc<ConnectionPool>>>,
change: GridViewRevisionChangeset, change: GridViewRevisionChangeset,
) -> FlowyResult<()> { ) -> FlowyResult<()> {
let GridViewRevisionChangeset { operations: delta, md5 } = change; let GridViewRevisionChangeset { operations: delta, md5 } = change;
let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair(); let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair();
let delta_data = delta.json_bytes(); let delta_data = delta.json_bytes();
let revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, delta_data, user_id, md5); let revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, delta_data, md5);
let _ = rev_manager.add_local_revision(&revision).await?; let _ = rev_manager.add_local_revision(&revision).await?;
Ok(()) Ok(())
} }
@ -496,7 +496,7 @@ impl RevisionObjectSerializer for GridViewRevisionSerde {
} }
pub struct GridViewRevisionCompress(); pub struct GridViewRevisionCompress();
impl RevisionCompress for GridViewRevisionCompress { impl RevisionMergeable for GridViewRevisionCompress {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> { fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
GridViewRevisionSerde::combine_revisions(revisions) GridViewRevisionSerde::combine_revisions(revisions)
} }

View file

@ -11,7 +11,9 @@ use dashmap::DashMap;
use flowy_database::ConnectionPool; use flowy_database::ConnectionPool;
use flowy_error::FlowyResult; use flowy_error::FlowyResult;
use flowy_grid_data_model::revision::{FieldRevision, RowChangeset, RowRevision}; use flowy_grid_data_model::revision::{FieldRevision, RowChangeset, RowRevision};
use flowy_revision::{RevisionManager, RevisionPersistence, SQLiteRevisionSnapshotPersistence}; use flowy_revision::{
RevisionManager, RevisionPersistence, RevisionPersistenceConfiguration, SQLiteRevisionSnapshotPersistence,
};
use lib_infra::future::AFFuture; use lib_infra::future::AFFuture;
use std::sync::Arc; use std::sync::Arc;
@ -253,7 +255,8 @@ pub async fn make_grid_view_rev_manager(
let pool = user.db_pool()?; let pool = user.db_pool()?;
let disk_cache = SQLiteGridViewRevisionPersistence::new(&user_id, pool.clone()); let disk_cache = SQLiteGridViewRevisionPersistence::new(&user_id, pool.clone());
let rev_persistence = RevisionPersistence::new(&user_id, view_id, disk_cache); let configuration = RevisionPersistenceConfiguration::new(2, false);
let rev_persistence = RevisionPersistence::new(&user_id, view_id, disk_cache, configuration);
let rev_compactor = GridViewRevisionCompress(); let rev_compactor = GridViewRevisionCompress();
let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(view_id, pool); let snapshot_persistence = SQLiteRevisionSnapshotPersistence::new(view_id, pool);

View file

@ -1,7 +1,7 @@
mod util; mod util;
pub mod block_editor; pub mod block_editor;
mod block_manager; pub mod block_manager;
mod block_manager_trait_impl; mod block_manager_trait_impl;
pub mod cell; pub mod cell;
pub mod field; pub mod field;

View file

@ -7,7 +7,7 @@ use flowy_database::{
ConnectionPool, ConnectionPool,
}; };
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState}; use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{ use flowy_sync::{
entities::revision::{Revision, RevisionRange}, entities::revision::{Revision, RevisionRange},
util::md5, util::md5,
@ -22,7 +22,7 @@ pub struct SQLiteGridBlockRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridBlockRevisionPersistence { impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridBlockRevisionPersistence {
type Error = FlowyError; type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> { fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let _ = GridMetaRevisionSql::create(revision_records, &*conn)?; let _ = GridMetaRevisionSql::create(revision_records, &*conn)?;
Ok(()) Ok(())
@ -36,7 +36,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridBlockRevisionPersisten
&self, &self,
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let records = GridMetaRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?; let records = GridMetaRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?;
Ok(records) Ok(records)
@ -46,7 +46,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridBlockRevisionPersisten
&self, &self,
object_id: &str, object_id: &str,
range: &RevisionRange, range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = &*self.pool.get().map_err(internal_error)?; let conn = &*self.pool.get().map_err(internal_error)?;
let revisions = GridMetaRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?; let revisions = GridMetaRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?;
Ok(revisions) Ok(revisions)
@ -73,7 +73,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridBlockRevisionPersisten
&self, &self,
object_id: &str, object_id: &str,
deleted_rev_ids: Option<Vec<i64>>, deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>, inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
conn.immediate_transaction::<_, FlowyError, _>(|| { conn.immediate_transaction::<_, FlowyError, _>(|| {
@ -95,7 +95,7 @@ impl SQLiteGridBlockRevisionPersistence {
struct GridMetaRevisionSql(); struct GridMetaRevisionSql();
impl GridMetaRevisionSql { impl GridMetaRevisionSql {
fn create(revision_records: Vec<RevisionRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> { fn create(revision_records: Vec<SyncRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
// Batch insert: https://diesel.rs/guides/all-about-inserts.html // Batch insert: https://diesel.rs/guides/all-about-inserts.html
let records = revision_records let records = revision_records
@ -142,7 +142,7 @@ impl GridMetaRevisionSql {
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let mut sql = dsl::grid_meta_rev_table let mut sql = dsl::grid_meta_rev_table
.filter(dsl::object_id.eq(object_id)) .filter(dsl::object_id.eq(object_id))
.into_boxed(); .into_boxed();
@ -163,7 +163,7 @@ impl GridMetaRevisionSql {
object_id: &str, object_id: &str,
range: RevisionRange, range: RevisionRange,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let rev_tables = dsl::grid_meta_rev_table let rev_tables = dsl::grid_meta_rev_table
.filter(dsl::rev_id.ge(range.start)) .filter(dsl::rev_id.ge(range.start))
.filter(dsl::rev_id.le(range.end)) .filter(dsl::rev_id.le(range.end))
@ -219,17 +219,16 @@ impl std::default::Default for GridBlockRevisionState {
} }
} }
fn mk_revision_record_from_table(user_id: &str, table: GridBlockRevisionTable) -> RevisionRecord { fn mk_revision_record_from_table(_user_id: &str, table: GridBlockRevisionTable) -> SyncRecord {
let md5 = md5(&table.data); let md5 = md5(&table.data);
let revision = Revision::new( let revision = Revision::new(
&table.object_id, &table.object_id,
table.base_rev_id, table.base_rev_id,
table.rev_id, table.rev_id,
Bytes::from(table.data), Bytes::from(table.data),
user_id,
md5, md5,
); );
RevisionRecord { SyncRecord {
revision, revision,
state: table.state.into(), state: table.state.into(),
write_to_disk: false, write_to_disk: false,

View file

@ -7,7 +7,7 @@ use flowy_database::{
ConnectionPool, ConnectionPool,
}; };
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState}; use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{ use flowy_sync::{
entities::revision::{Revision, RevisionRange}, entities::revision::{Revision, RevisionRange},
util::md5, util::md5,
@ -22,7 +22,7 @@ pub struct SQLiteGridRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridRevisionPersistence { impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridRevisionPersistence {
type Error = FlowyError; type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> { fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let _ = GridRevisionSql::create(revision_records, &*conn)?; let _ = GridRevisionSql::create(revision_records, &*conn)?;
Ok(()) Ok(())
@ -36,7 +36,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridRevisionPersistence {
&self, &self,
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let records = GridRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?; let records = GridRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?;
Ok(records) Ok(records)
@ -46,7 +46,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridRevisionPersistence {
&self, &self,
object_id: &str, object_id: &str,
range: &RevisionRange, range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = &*self.pool.get().map_err(internal_error)?; let conn = &*self.pool.get().map_err(internal_error)?;
let revisions = GridRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?; let revisions = GridRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?;
Ok(revisions) Ok(revisions)
@ -73,7 +73,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridRevisionPersistence {
&self, &self,
object_id: &str, object_id: &str,
deleted_rev_ids: Option<Vec<i64>>, deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>, inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
conn.immediate_transaction::<_, FlowyError, _>(|| { conn.immediate_transaction::<_, FlowyError, _>(|| {
@ -95,7 +95,7 @@ impl SQLiteGridRevisionPersistence {
struct GridRevisionSql(); struct GridRevisionSql();
impl GridRevisionSql { impl GridRevisionSql {
fn create(revision_records: Vec<RevisionRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> { fn create(revision_records: Vec<SyncRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
// Batch insert: https://diesel.rs/guides/all-about-inserts.html // Batch insert: https://diesel.rs/guides/all-about-inserts.html
let records = revision_records let records = revision_records
.into_iter() .into_iter()
@ -141,7 +141,7 @@ impl GridRevisionSql {
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let mut sql = dsl::grid_rev_table.filter(dsl::object_id.eq(object_id)).into_boxed(); let mut sql = dsl::grid_rev_table.filter(dsl::object_id.eq(object_id)).into_boxed();
if let Some(rev_ids) = rev_ids { if let Some(rev_ids) = rev_ids {
sql = sql.filter(dsl::rev_id.eq_any(rev_ids)); sql = sql.filter(dsl::rev_id.eq_any(rev_ids));
@ -160,7 +160,7 @@ impl GridRevisionSql {
object_id: &str, object_id: &str,
range: RevisionRange, range: RevisionRange,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let rev_tables = dsl::grid_rev_table let rev_tables = dsl::grid_rev_table
.filter(dsl::rev_id.ge(range.start)) .filter(dsl::rev_id.ge(range.start))
.filter(dsl::rev_id.le(range.end)) .filter(dsl::rev_id.le(range.end))
@ -217,17 +217,16 @@ impl std::default::Default for GridRevisionState {
} }
} }
fn mk_revision_record_from_table(user_id: &str, table: GridRevisionTable) -> RevisionRecord { fn mk_revision_record_from_table(_user_id: &str, table: GridRevisionTable) -> SyncRecord {
let md5 = md5(&table.data); let md5 = md5(&table.data);
let revision = Revision::new( let revision = Revision::new(
&table.object_id, &table.object_id,
table.base_rev_id, table.base_rev_id,
table.rev_id, table.rev_id,
Bytes::from(table.data), Bytes::from(table.data),
user_id,
md5, md5,
); );
RevisionRecord { SyncRecord {
revision, revision,
state: table.state.into(), state: table.state.into(),
write_to_disk: false, write_to_disk: false,

View file

@ -7,7 +7,7 @@ use flowy_database::{
ConnectionPool, ConnectionPool,
}; };
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionRecord, RevisionState}; use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, RevisionState, SyncRecord};
use flowy_sync::{ use flowy_sync::{
entities::revision::{Revision, RevisionRange}, entities::revision::{Revision, RevisionRange},
util::md5, util::md5,
@ -31,7 +31,7 @@ impl SQLiteGridViewRevisionPersistence {
impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridViewRevisionPersistence { impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridViewRevisionPersistence {
type Error = FlowyError; type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> { fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let _ = GridViewRevisionSql::create(revision_records, &*conn)?; let _ = GridViewRevisionSql::create(revision_records, &*conn)?;
Ok(()) Ok(())
@ -45,7 +45,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridViewRevisionPersistenc
&self, &self,
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
let records = GridViewRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?; let records = GridViewRevisionSql::read(&self.user_id, object_id, rev_ids, &*conn)?;
Ok(records) Ok(records)
@ -55,7 +55,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridViewRevisionPersistenc
&self, &self,
object_id: &str, object_id: &str,
range: &RevisionRange, range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
let conn = &*self.pool.get().map_err(internal_error)?; let conn = &*self.pool.get().map_err(internal_error)?;
let revisions = GridViewRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?; let revisions = GridViewRevisionSql::read_with_range(&self.user_id, object_id, range.clone(), conn)?;
Ok(revisions) Ok(revisions)
@ -82,7 +82,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridViewRevisionPersistenc
&self, &self,
object_id: &str, object_id: &str,
deleted_rev_ids: Option<Vec<i64>>, deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>, inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
let conn = self.pool.get().map_err(internal_error)?; let conn = self.pool.get().map_err(internal_error)?;
conn.immediate_transaction::<_, FlowyError, _>(|| { conn.immediate_transaction::<_, FlowyError, _>(|| {
@ -95,7 +95,7 @@ impl RevisionDiskCache<Arc<ConnectionPool>> for SQLiteGridViewRevisionPersistenc
struct GridViewRevisionSql(); struct GridViewRevisionSql();
impl GridViewRevisionSql { impl GridViewRevisionSql {
fn create(revision_records: Vec<RevisionRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> { fn create(revision_records: Vec<SyncRecord>, conn: &SqliteConnection) -> Result<(), FlowyError> {
// Batch insert: https://diesel.rs/guides/all-about-inserts.html // Batch insert: https://diesel.rs/guides/all-about-inserts.html
let records = revision_records let records = revision_records
.into_iter() .into_iter()
@ -141,7 +141,7 @@ impl GridViewRevisionSql {
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let mut sql = dsl::grid_view_rev_table let mut sql = dsl::grid_view_rev_table
.filter(dsl::object_id.eq(object_id)) .filter(dsl::object_id.eq(object_id))
.into_boxed(); .into_boxed();
@ -162,7 +162,7 @@ impl GridViewRevisionSql {
object_id: &str, object_id: &str,
range: RevisionRange, range: RevisionRange,
conn: &SqliteConnection, conn: &SqliteConnection,
) -> Result<Vec<RevisionRecord>, FlowyError> { ) -> Result<Vec<SyncRecord>, FlowyError> {
let rev_tables = dsl::grid_view_rev_table let rev_tables = dsl::grid_view_rev_table
.filter(dsl::rev_id.ge(range.start)) .filter(dsl::rev_id.ge(range.start))
.filter(dsl::rev_id.le(range.end)) .filter(dsl::rev_id.le(range.end))
@ -219,17 +219,16 @@ impl std::default::Default for GridViewRevisionState {
} }
} }
fn mk_revision_record_from_table(user_id: &str, table: GridViewRevisionTable) -> RevisionRecord { fn mk_revision_record_from_table(_user_id: &str, table: GridViewRevisionTable) -> SyncRecord {
let md5 = md5(&table.data); let md5 = md5(&table.data);
let revision = Revision::new( let revision = Revision::new(
&table.object_id, &table.object_id,
table.base_rev_id, table.base_rev_id,
table.rev_id, table.rev_id,
Bytes::from(table.data), Bytes::from(table.data),
user_id,
md5, md5,
); );
RevisionRecord { SyncRecord {
revision, revision,
state: table.state.into(), state: table.state.into(),
write_to_disk: false, write_to_disk: false,

View file

@ -1,4 +1,4 @@
use crate::services::tasks::queue::{GridTaskQueue, TaskHandlerId}; use crate::services::tasks::queue::GridTaskQueue;
use crate::services::tasks::runner::GridTaskRunner; use crate::services::tasks::runner::GridTaskRunner;
use crate::services::tasks::store::GridTaskStore; use crate::services::tasks::store::GridTaskStore;
use crate::services::tasks::task::Task; use crate::services::tasks::task::Task;
@ -6,7 +6,8 @@ use crate::services::tasks::task::Task;
use crate::services::tasks::{TaskContent, TaskId, TaskStatus}; use crate::services::tasks::{TaskContent, TaskId, TaskStatus};
use flowy_error::FlowyError; use flowy_error::FlowyError;
use lib_infra::future::BoxResultFuture; use lib_infra::future::BoxResultFuture;
use std::collections::HashMap; use lib_infra::ref_map::{RefCountHashMap, RefCountValue};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::{watch, RwLock}; use tokio::sync::{watch, RwLock};
@ -17,11 +18,17 @@ pub(crate) trait GridTaskHandler: Send + Sync + 'static {
fn process_content(&self, content: TaskContent) -> BoxResultFuture<(), FlowyError>; fn process_content(&self, content: TaskContent) -> BoxResultFuture<(), FlowyError>;
} }
#[derive(Clone)]
struct RefCountTaskHandler(Arc<dyn GridTaskHandler>);
impl RefCountValue for RefCountTaskHandler {
fn did_remove(&self) {}
}
pub struct GridTaskScheduler { pub struct GridTaskScheduler {
queue: GridTaskQueue, queue: GridTaskQueue,
store: GridTaskStore, store: GridTaskStore,
notifier: watch::Sender<bool>, notifier: watch::Sender<bool>,
handlers: HashMap<TaskHandlerId, Arc<dyn GridTaskHandler>>, handlers: RefCountHashMap<RefCountTaskHandler>,
} }
impl GridTaskScheduler { impl GridTaskScheduler {
@ -32,7 +39,7 @@ impl GridTaskScheduler {
queue: GridTaskQueue::new(), queue: GridTaskQueue::new(),
store: GridTaskStore::new(), store: GridTaskStore::new(),
notifier, notifier,
handlers: HashMap::new(), handlers: RefCountHashMap::new(),
}; };
// The runner will receive the newest value after start running. // The runner will receive the newest value after start running.
scheduler.notify(); scheduler.notify();
@ -50,11 +57,11 @@ impl GridTaskScheduler {
T: GridTaskHandler, T: GridTaskHandler,
{ {
let handler_id = handler.handler_id().to_owned(); let handler_id = handler.handler_id().to_owned();
self.handlers.insert(handler_id, handler); self.handlers.insert(handler_id, RefCountTaskHandler(handler));
} }
pub(crate) fn unregister_handler<T: AsRef<str>>(&mut self, handler_id: T) { pub(crate) fn unregister_handler<T: AsRef<str>>(&mut self, handler_id: T) {
let _ = self.handlers.remove(handler_id.as_ref()); self.handlers.remove(handler_id.as_ref());
} }
#[allow(dead_code)] #[allow(dead_code)]
@ -73,7 +80,7 @@ impl GridTaskScheduler {
let content = task.content.take()?; let content = task.content.take()?;
task.set_status(TaskStatus::Processing); task.set_status(TaskStatus::Processing);
let _ = match handler.process_content(content).await { let _ = match handler.0.process_content(content).await {
Ok(_) => { Ok(_) => {
task.set_status(TaskStatus::Done); task.set_status(TaskStatus::Done);
let _ = ret.send(task.into()); let _ = ret.send(task.into());
@ -110,6 +117,7 @@ mod tests {
use crate::services::tasks::{GridTaskHandler, GridTaskScheduler, Task, TaskContent, TaskStatus}; use crate::services::tasks::{GridTaskHandler, GridTaskScheduler, Task, TaskContent, TaskStatus};
use flowy_error::FlowyError; use flowy_error::FlowyError;
use lib_infra::future::BoxResultFuture; use lib_infra::future::BoxResultFuture;
use lib_infra::ref_map::RefCountValue;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::time::interval; use tokio::time::interval;
@ -169,6 +177,11 @@ mod tests {
assert_eq!(rx_2.await.unwrap().status, TaskStatus::Done); assert_eq!(rx_2.await.unwrap().status, TaskStatus::Done);
} }
struct MockGridTaskHandler(); struct MockGridTaskHandler();
impl RefCountValue for MockGridTaskHandler {
fn did_remove(&self) {}
}
impl GridTaskHandler for MockGridTaskHandler { impl GridTaskHandler for MockGridTaskHandler {
fn handler_id(&self) -> &str { fn handler_id(&self) -> &str {
"1" "1"

View file

@ -21,5 +21,12 @@ futures-util = "0.3.15"
async-stream = "0.3.2" async-stream = "0.3.2"
serde_json = {version = "1.0"} serde_json = {version = "1.0"}
[dev-dependencies]
nanoid = "0.4.0"
flowy-revision = {path = "../flowy-revision", features = ["flowy_unit_test"]}
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0" }
parking_lot = "0.11"
[features] [features]
flowy_unit_test = [] flowy_unit_test = []

View file

@ -5,23 +5,20 @@ use std::sync::Arc;
pub trait RevisionDiskCache<Connection>: Sync + Send { pub trait RevisionDiskCache<Connection>: Sync + Send {
type Error: Debug; type Error: Debug;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error>; fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error>;
fn get_connection(&self) -> Result<Connection, Self::Error>; fn get_connection(&self) -> Result<Connection, Self::Error>;
// Read all the records if the rev_ids is None // Read all the records if the rev_ids is None
fn read_revision_records( fn read_revision_records(&self, object_id: &str, rev_ids: Option<Vec<i64>>)
&self, -> Result<Vec<SyncRecord>, Self::Error>;
object_id: &str,
rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error>;
// Read the revision which rev_id >= range.start && rev_id <= range.end // Read the revision which rev_id >= range.start && rev_id <= range.end
fn read_revision_records_with_range( fn read_revision_records_with_range(
&self, &self,
object_id: &str, object_id: &str,
range: &RevisionRange, range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error>; ) -> Result<Vec<SyncRecord>, Self::Error>;
fn update_revision_record(&self, changesets: Vec<RevisionChangeset>) -> FlowyResult<()>; fn update_revision_record(&self, changesets: Vec<RevisionChangeset>) -> FlowyResult<()>;
@ -34,7 +31,7 @@ pub trait RevisionDiskCache<Connection>: Sync + Send {
&self, &self,
object_id: &str, object_id: &str,
deleted_rev_ids: Option<Vec<i64>>, deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>, inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error>; ) -> Result<(), Self::Error>;
} }
@ -44,7 +41,7 @@ where
{ {
type Error = FlowyError; type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<RevisionRecord>) -> Result<(), Self::Error> { fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
(**self).create_revision_records(revision_records) (**self).create_revision_records(revision_records)
} }
@ -56,7 +53,7 @@ where
&self, &self,
object_id: &str, object_id: &str,
rev_ids: Option<Vec<i64>>, rev_ids: Option<Vec<i64>>,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
(**self).read_revision_records(object_id, rev_ids) (**self).read_revision_records(object_id, rev_ids)
} }
@ -64,7 +61,7 @@ where
&self, &self,
object_id: &str, object_id: &str,
range: &RevisionRange, range: &RevisionRange,
) -> Result<Vec<RevisionRecord>, Self::Error> { ) -> Result<Vec<SyncRecord>, Self::Error> {
(**self).read_revision_records_with_range(object_id, range) (**self).read_revision_records_with_range(object_id, range)
} }
@ -80,20 +77,20 @@ where
&self, &self,
object_id: &str, object_id: &str,
deleted_rev_ids: Option<Vec<i64>>, deleted_rev_ids: Option<Vec<i64>>,
inserted_records: Vec<RevisionRecord>, inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
(**self).delete_and_insert_records(object_id, deleted_rev_ids, inserted_records) (**self).delete_and_insert_records(object_id, deleted_rev_ids, inserted_records)
} }
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct RevisionRecord { pub struct SyncRecord {
pub revision: Revision, pub revision: Revision,
pub state: RevisionState, pub state: RevisionState,
pub write_to_disk: bool, pub write_to_disk: bool,
} }
impl RevisionRecord { impl SyncRecord {
pub fn new(revision: Revision) -> Self { pub fn new(revision: Revision) -> Self {
Self { Self {
revision, revision,

View file

@ -1,4 +1,4 @@
use crate::disk::RevisionRecord; use crate::disk::SyncRecord;
use crate::REVISION_WRITE_INTERVAL_IN_MILLIS; use crate::REVISION_WRITE_INTERVAL_IN_MILLIS;
use dashmap::DashMap; use dashmap::DashMap;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
@ -7,15 +7,15 @@ use std::{borrow::Cow, sync::Arc, time::Duration};
use tokio::{sync::RwLock, task::JoinHandle}; use tokio::{sync::RwLock, task::JoinHandle};
pub(crate) trait RevisionMemoryCacheDelegate: Send + Sync { pub(crate) trait RevisionMemoryCacheDelegate: Send + Sync {
fn checkpoint_tick(&self, records: Vec<RevisionRecord>) -> FlowyResult<()>; fn send_sync(&self, records: Vec<SyncRecord>) -> FlowyResult<()>;
fn receive_ack(&self, object_id: &str, rev_id: i64); fn receive_ack(&self, object_id: &str, rev_id: i64);
} }
pub(crate) struct RevisionMemoryCache { pub(crate) struct RevisionMemoryCache {
object_id: String, object_id: String,
revs_map: Arc<DashMap<i64, RevisionRecord>>, revs_map: Arc<DashMap<i64, SyncRecord>>,
delegate: Arc<dyn RevisionMemoryCacheDelegate>, delegate: Arc<dyn RevisionMemoryCacheDelegate>,
pending_write_revs: Arc<RwLock<Vec<i64>>>, defer_write_revs: Arc<RwLock<Vec<i64>>>,
defer_save: RwLock<Option<JoinHandle<()>>>, defer_save: RwLock<Option<JoinHandle<()>>>,
} }
@ -25,7 +25,7 @@ impl RevisionMemoryCache {
object_id: object_id.to_owned(), object_id: object_id.to_owned(),
revs_map: Arc::new(DashMap::new()), revs_map: Arc::new(DashMap::new()),
delegate, delegate,
pending_write_revs: Arc::new(RwLock::new(vec![])), defer_write_revs: Arc::new(RwLock::new(vec![])),
defer_save: RwLock::new(None), defer_save: RwLock::new(None),
} }
} }
@ -34,7 +34,7 @@ impl RevisionMemoryCache {
self.revs_map.contains_key(rev_id) self.revs_map.contains_key(rev_id)
} }
pub(crate) async fn add<'a>(&'a self, record: Cow<'a, RevisionRecord>) { pub(crate) async fn add<'a>(&'a self, record: Cow<'a, SyncRecord>) {
let record = match record { let record = match record {
Cow::Borrowed(record) => record.clone(), Cow::Borrowed(record) => record.clone(),
Cow::Owned(record) => record, Cow::Owned(record) => record,
@ -43,11 +43,11 @@ impl RevisionMemoryCache {
let rev_id = record.revision.rev_id; let rev_id = record.revision.rev_id;
self.revs_map.insert(rev_id, record); self.revs_map.insert(rev_id, record);
let mut write_guard = self.pending_write_revs.write().await; let mut write_guard = self.defer_write_revs.write().await;
if !write_guard.contains(&rev_id) { if !write_guard.contains(&rev_id) {
write_guard.push(rev_id); write_guard.push(rev_id);
drop(write_guard); drop(write_guard);
self.make_checkpoint().await; self.tick_checkpoint().await;
} }
} }
@ -57,8 +57,8 @@ impl RevisionMemoryCache {
Some(mut record) => record.ack(), Some(mut record) => record.ack(),
} }
if self.pending_write_revs.read().await.contains(rev_id) { if self.defer_write_revs.read().await.contains(rev_id) {
self.make_checkpoint().await; self.tick_checkpoint().await;
} else { } else {
// The revision must be saved on disk if the pending_write_revs // The revision must be saved on disk if the pending_write_revs
// doesn't contains the rev_id. // doesn't contains the rev_id.
@ -66,7 +66,7 @@ impl RevisionMemoryCache {
} }
} }
pub(crate) async fn get(&self, rev_id: &i64) -> Option<RevisionRecord> { pub(crate) async fn get(&self, rev_id: &i64) -> Option<SyncRecord> {
self.revs_map.get(rev_id).map(|r| r.value().clone()) self.revs_map.get(rev_id).map(|r| r.value().clone())
} }
@ -80,21 +80,25 @@ impl RevisionMemoryCache {
} }
} }
pub(crate) async fn get_with_range(&self, range: &RevisionRange) -> Result<Vec<RevisionRecord>, FlowyError> { pub(crate) async fn get_with_range(&self, range: &RevisionRange) -> Result<Vec<SyncRecord>, FlowyError> {
let revs = range let revs = range
.iter() .iter()
.flat_map(|rev_id| self.revs_map.get(&rev_id).map(|record| record.clone())) .flat_map(|rev_id| self.revs_map.get(&rev_id).map(|record| record.clone()))
.collect::<Vec<RevisionRecord>>(); .collect::<Vec<SyncRecord>>();
Ok(revs) Ok(revs)
} }
pub(crate) async fn reset_with_revisions(&self, revision_records: Vec<RevisionRecord>) { pub(crate) fn number_of_sync_records(&self) -> usize {
self.revs_map.len()
}
pub(crate) async fn reset_with_revisions(&self, revision_records: Vec<SyncRecord>) {
self.revs_map.clear(); self.revs_map.clear();
if let Some(handler) = self.defer_save.write().await.take() { if let Some(handler) = self.defer_save.write().await.take() {
handler.abort(); handler.abort();
} }
let mut write_guard = self.pending_write_revs.write().await; let mut write_guard = self.defer_write_revs.write().await;
write_guard.clear(); write_guard.clear();
for record in revision_records { for record in revision_records {
write_guard.push(record.revision.rev_id); write_guard.push(record.revision.rev_id);
@ -102,21 +106,21 @@ impl RevisionMemoryCache {
} }
drop(write_guard); drop(write_guard);
self.make_checkpoint().await; self.tick_checkpoint().await;
} }
async fn make_checkpoint(&self) { async fn tick_checkpoint(&self) {
// https://github.com/async-graphql/async-graphql/blob/ed8449beec3d9c54b94da39bab33cec809903953/src/dataloader/mod.rs#L362 // https://github.com/async-graphql/async-graphql/blob/ed8449beec3d9c54b94da39bab33cec809903953/src/dataloader/mod.rs#L362
if let Some(handler) = self.defer_save.write().await.take() { if let Some(handler) = self.defer_save.write().await.take() {
handler.abort(); handler.abort();
} }
if self.pending_write_revs.read().await.is_empty() { if self.defer_write_revs.read().await.is_empty() {
return; return;
} }
let rev_map = self.revs_map.clone(); let rev_map = self.revs_map.clone();
let pending_write_revs = self.pending_write_revs.clone(); let pending_write_revs = self.defer_write_revs.clone();
let delegate = self.delegate.clone(); let delegate = self.delegate.clone();
*self.defer_save.write().await = Some(tokio::spawn(async move { *self.defer_save.write().await = Some(tokio::spawn(async move {
@ -128,7 +132,7 @@ impl RevisionMemoryCache {
// //
// Use saturating_sub and split_off ? // Use saturating_sub and split_off ?
// https://stackoverflow.com/questions/28952411/what-is-the-idiomatic-way-to-pop-the-last-n-elements-in-a-mutable-vec // https://stackoverflow.com/questions/28952411/what-is-the-idiomatic-way-to-pop-the-last-n-elements-in-a-mutable-vec
let mut save_records: Vec<RevisionRecord> = vec![]; let mut save_records: Vec<SyncRecord> = vec![];
revs_write_guard.iter().for_each(|rev_id| match rev_map.get(rev_id) { revs_write_guard.iter().for_each(|rev_id| match rev_map.get(rev_id) {
None => {} None => {}
Some(value) => { Some(value) => {
@ -136,7 +140,7 @@ impl RevisionMemoryCache {
} }
}); });
if delegate.checkpoint_tick(save_records).is_ok() { if delegate.send_sync(save_records).is_ok() {
revs_write_guard.clear(); revs_write_guard.clear();
drop(revs_write_guard); drop(revs_write_guard);
} }

View file

@ -1,5 +1,5 @@
use crate::disk::{RevisionDiskCache, RevisionRecord}; use crate::disk::{RevisionDiskCache, SyncRecord};
use crate::{RevisionLoader, RevisionPersistence}; use crate::{RevisionLoader, RevisionPersistence, RevisionPersistenceConfiguration};
use bytes::Bytes; use bytes::Bytes;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::entities::revision::Revision; use flowy_sync::entities::revision::Revision;
@ -47,7 +47,7 @@ where
let _ = self.save_migrate_record()?; let _ = self.save_migrate_record()?;
} }
Some(s) => { Some(s) => {
let mut record = MigrationObjectRecord::from_str(&s)?; let mut record = MigrationObjectRecord::from_str(&s).map_err(|e| FlowyError::serde().context(e))?;
let rev_str = self.target.default_target_rev_str()?; let rev_str = self.target.default_target_rev_str()?;
if record.len < rev_str.len() { if record.len < rev_str.len() {
let _ = self.reset_object().await?; let _ = self.reset_object().await?;
@ -60,10 +60,12 @@ where
} }
async fn reset_object(&self) -> FlowyResult<()> { async fn reset_object(&self) -> FlowyResult<()> {
let configuration = RevisionPersistenceConfiguration::new(2, false);
let rev_persistence = Arc::new(RevisionPersistence::from_disk_cache( let rev_persistence = Arc::new(RevisionPersistence::from_disk_cache(
&self.user_id, &self.user_id,
self.target.target_id(), self.target.target_id(),
self.disk_cache.clone(), self.disk_cache.clone(),
configuration,
)); ));
let (revisions, _) = RevisionLoader { let (revisions, _) = RevisionLoader {
object_id: self.target.target_id().to_owned(), object_id: self.target.target_id().to_owned(),
@ -75,8 +77,8 @@ where
.await?; .await?;
let bytes = self.target.reset_data(revisions)?; let bytes = self.target.reset_data(revisions)?;
let revision = Revision::initial_revision(&self.user_id, self.target.target_id(), bytes); let revision = Revision::initial_revision(self.target.target_id(), bytes);
let record = RevisionRecord::new(revision); let record = SyncRecord::new(revision);
tracing::trace!("Reset {} revision record object", self.target.target_id()); tracing::trace!("Reset {} revision record object", self.target.target_id());
let _ = self let _ = self

View file

@ -1,4 +1,4 @@
use crate::RevisionManager; use crate::{RevisionMD5, RevisionManager};
use bytes::Bytes; use bytes::Bytes;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::entities::{ use flowy_sync::entities::{
@ -8,8 +8,6 @@ use flowy_sync::entities::{
use lib_infra::future::BoxResultFuture; use lib_infra::future::BoxResultFuture;
use std::{convert::TryFrom, sync::Arc}; use std::{convert::TryFrom, sync::Arc};
pub type OperationsMD5 = String;
pub struct TransformOperations<Operations> { pub struct TransformOperations<Operations> {
pub client_operations: Operations, pub client_operations: Operations,
pub server_operations: Option<Operations>, pub server_operations: Option<Operations>,
@ -28,12 +26,12 @@ pub trait ConflictResolver<Operations>
where where
Operations: Send + Sync, Operations: Send + Sync,
{ {
fn compose_operations(&self, operations: Operations) -> BoxResultFuture<OperationsMD5, FlowyError>; fn compose_operations(&self, operations: Operations) -> BoxResultFuture<RevisionMD5, FlowyError>;
fn transform_operations( fn transform_operations(
&self, &self,
operations: Operations, operations: Operations,
) -> BoxResultFuture<TransformOperations<Operations>, FlowyError>; ) -> BoxResultFuture<TransformOperations<Operations>, FlowyError>;
fn reset_operations(&self, operations: Operations) -> BoxResultFuture<OperationsMD5, FlowyError>; fn reset_operations(&self, operations: Operations) -> BoxResultFuture<RevisionMD5, FlowyError>;
} }
pub trait ConflictRevisionSink: Send + Sync + 'static { pub trait ConflictRevisionSink: Send + Sync + 'static {
@ -129,9 +127,8 @@ where
// The server_prime is None means the client local revisions conflict with the // The server_prime is None means the client local revisions conflict with the
// // server, and it needs to override the client delta. // // server, and it needs to override the client delta.
let md5 = self.resolver.reset_operations(client_operations).await?; let md5 = self.resolver.reset_operations(client_operations).await?;
let repeated_revision = RepeatedRevision::new(revisions); debug_assert!(md5.is_equal(&revisions.last().unwrap().md5));
assert_eq!(repeated_revision.last().unwrap().md5, md5); let _ = self.rev_manager.reset_object(revisions).await?;
let _ = self.rev_manager.reset_object(repeated_revision).await?;
Ok(None) Ok(None)
} }
Some(server_operations) => { Some(server_operations) => {
@ -154,11 +151,11 @@ where
} }
fn make_client_and_server_revision<Operations, Connection>( fn make_client_and_server_revision<Operations, Connection>(
user_id: &str, _user_id: &str,
rev_manager: &Arc<RevisionManager<Connection>>, rev_manager: &Arc<RevisionManager<Connection>>,
client_operations: Operations, client_operations: Operations,
server_operations: Option<Operations>, server_operations: Option<Operations>,
md5: String, md5: RevisionMD5,
) -> (Revision, Option<Revision>) ) -> (Revision, Option<Revision>)
where where
Operations: OperationsSerializer, Operations: OperationsSerializer,
@ -166,13 +163,13 @@ where
{ {
let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair(); let (base_rev_id, rev_id) = rev_manager.next_rev_id_pair();
let bytes = client_operations.serialize_operations(); let bytes = client_operations.serialize_operations();
let client_revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, bytes, user_id, md5.clone()); let client_revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, bytes, md5.clone());
match server_operations { match server_operations {
None => (client_revision, None), None => (client_revision, None),
Some(operations) => { Some(operations) => {
let bytes = operations.serialize_operations(); let bytes = operations.serialize_operations();
let server_revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, bytes, user_id, md5); let server_revision = Revision::new(&rev_manager.object_id, base_rev_id, rev_id, bytes, md5);
(client_revision, Some(server_revision)) (client_revision, Some(server_revision))
} }
} }

View file

@ -3,8 +3,8 @@ use crate::{RevisionPersistence, RevisionSnapshotDiskCache, RevisionSnapshotMana
use bytes::Bytes; use bytes::Bytes;
use flowy_error::{FlowyError, FlowyResult}; use flowy_error::{FlowyError, FlowyResult};
use flowy_sync::{ use flowy_sync::{
entities::revision::{RepeatedRevision, Revision, RevisionRange}, entities::revision::{Revision, RevisionRange},
util::{pair_rev_id_from_revisions, RevIdCounter}, util::{md5, pair_rev_id_from_revisions, RevIdCounter},
}; };
use lib_infra::future::FutureResult; use lib_infra::future::FutureResult;
use std::sync::Arc; use std::sync::Arc;
@ -42,13 +42,8 @@ pub trait RevisionObjectSerializer: Send + Sync {
/// `RevisionCompress` is used to compress multiple revisions into one revision /// `RevisionCompress` is used to compress multiple revisions into one revision
/// ///
pub trait RevisionCompress: Send + Sync { pub trait RevisionMergeable: Send + Sync {
fn compress_revisions( fn merge_revisions(&self, _user_id: &str, object_id: &str, mut revisions: Vec<Revision>) -> FlowyResult<Revision> {
&self,
user_id: &str,
object_id: &str,
mut revisions: Vec<Revision>,
) -> FlowyResult<Revision> {
if revisions.is_empty() { if revisions.is_empty() {
return Err(FlowyError::internal().context("Can't compact the empty revisions")); return Err(FlowyError::internal().context("Can't compact the empty revisions"));
} }
@ -63,7 +58,7 @@ pub trait RevisionCompress: Send + Sync {
let (base_rev_id, rev_id) = first_revision.pair_rev_id(); let (base_rev_id, rev_id) = first_revision.pair_rev_id();
let md5 = last_revision.md5.clone(); let md5 = last_revision.md5.clone();
let bytes = self.combine_revisions(revisions)?; let bytes = self.combine_revisions(revisions)?;
Ok(Revision::new(object_id, base_rev_id, rev_id, bytes, user_id, md5)) Ok(Revision::new(object_id, base_rev_id, rev_id, bytes, md5))
} }
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes>; fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes>;
@ -76,7 +71,7 @@ pub struct RevisionManager<Connection> {
rev_persistence: Arc<RevisionPersistence<Connection>>, rev_persistence: Arc<RevisionPersistence<Connection>>,
#[allow(dead_code)] #[allow(dead_code)]
rev_snapshot: Arc<RevisionSnapshotManager>, rev_snapshot: Arc<RevisionSnapshotManager>,
rev_compress: Arc<dyn RevisionCompress>, rev_compress: Arc<dyn RevisionMergeable>,
#[cfg(feature = "flowy_unit_test")] #[cfg(feature = "flowy_unit_test")]
rev_ack_notifier: tokio::sync::broadcast::Sender<i64>, rev_ack_notifier: tokio::sync::broadcast::Sender<i64>,
} }
@ -91,14 +86,12 @@ impl<Connection: 'static> RevisionManager<Connection> {
) -> Self ) -> Self
where where
SP: 'static + RevisionSnapshotDiskCache, SP: 'static + RevisionSnapshotDiskCache,
C: 'static + RevisionCompress, C: 'static + RevisionMergeable,
{ {
let rev_id_counter = RevIdCounter::new(0); let rev_id_counter = RevIdCounter::new(0);
let rev_compress = Arc::new(rev_compress); let rev_compress = Arc::new(rev_compress);
let rev_persistence = Arc::new(rev_persistence); let rev_persistence = Arc::new(rev_persistence);
let rev_snapshot = Arc::new(RevisionSnapshotManager::new(user_id, object_id, snapshot_persistence)); let rev_snapshot = Arc::new(RevisionSnapshotManager::new(user_id, object_id, snapshot_persistence));
#[cfg(feature = "flowy_unit_test")]
let (revision_ack_notifier, _) = tokio::sync::broadcast::channel(1);
Self { Self {
object_id: object_id.to_string(), object_id: object_id.to_string(),
@ -108,12 +101,12 @@ impl<Connection: 'static> RevisionManager<Connection> {
rev_snapshot, rev_snapshot,
rev_compress, rev_compress,
#[cfg(feature = "flowy_unit_test")] #[cfg(feature = "flowy_unit_test")]
rev_ack_notifier: revision_ack_notifier, rev_ack_notifier: tokio::sync::broadcast::channel(1).0,
} }
} }
#[tracing::instrument(level = "debug", skip_all, fields(object_id) err)] #[tracing::instrument(level = "debug", skip_all, fields(object_id) err)]
pub async fn load<B>(&mut self, cloud: Option<Arc<dyn RevisionCloudService>>) -> FlowyResult<B::Output> pub async fn initialize<B>(&mut self, cloud: Option<Arc<dyn RevisionCloudService>>) -> FlowyResult<B::Output>
where where
B: RevisionObjectDeserializer, B: RevisionObjectDeserializer,
{ {
@ -130,6 +123,10 @@ impl<Connection: 'static> RevisionManager<Connection> {
B::deserialize_revisions(&self.object_id, revisions) B::deserialize_revisions(&self.object_id, revisions)
} }
pub async fn close(&self) {
let _ = self.rev_persistence.compact_lagging_revisions(&self.rev_compress).await;
}
pub async fn load_revisions(&self) -> FlowyResult<Vec<Revision>> { pub async fn load_revisions(&self) -> FlowyResult<Vec<Revision>> {
let revisions = RevisionLoader { let revisions = RevisionLoader {
object_id: self.object_id.clone(), object_id: self.object_id.clone(),
@ -143,9 +140,9 @@ impl<Connection: 'static> RevisionManager<Connection> {
} }
#[tracing::instrument(level = "debug", skip(self, revisions), err)] #[tracing::instrument(level = "debug", skip(self, revisions), err)]
pub async fn reset_object(&self, revisions: RepeatedRevision) -> FlowyResult<()> { pub async fn reset_object(&self, revisions: Vec<Revision>) -> FlowyResult<()> {
let rev_id = pair_rev_id_from_revisions(&revisions).1; let rev_id = pair_rev_id_from_revisions(&revisions).1;
let _ = self.rev_persistence.reset(revisions.into_inner()).await?; let _ = self.rev_persistence.reset(revisions).await?;
self.rev_id_counter.set(rev_id); self.rev_id_counter.set(rev_id);
Ok(()) Ok(())
} }
@ -185,16 +182,29 @@ impl<Connection: 'static> RevisionManager<Connection> {
Ok(()) Ok(())
} }
/// Returns the current revision id
pub fn rev_id(&self) -> i64 { pub fn rev_id(&self) -> i64 {
self.rev_id_counter.value() self.rev_id_counter.value()
} }
pub async fn next_sync_rev_id(&self) -> Option<i64> {
self.rev_persistence.next_sync_rev_id().await
}
pub fn next_rev_id_pair(&self) -> (i64, i64) { pub fn next_rev_id_pair(&self) -> (i64, i64) {
let cur = self.rev_id_counter.value(); let cur = self.rev_id_counter.value();
let next = self.rev_id_counter.next(); let next = self.rev_id_counter.next_id();
(cur, next) (cur, next)
} }
pub fn number_of_sync_revisions(&self) -> usize {
self.rev_persistence.number_of_sync_records()
}
pub fn number_of_revisions_in_disk(&self) -> usize {
self.rev_persistence.number_of_records_in_disk()
}
pub async fn get_revisions_in_range(&self, range: RevisionRange) -> Result<Vec<Revision>, FlowyError> { pub async fn get_revisions_in_range(&self, range: RevisionRange) -> Result<Vec<Revision>, FlowyError> {
let revisions = self.rev_persistence.revisions_in_range(&range).await?; let revisions = self.rev_persistence.revisions_in_range(&range).await?;
Ok(revisions) Ok(revisions)
@ -226,13 +236,16 @@ impl<Connection: 'static> WSDataProviderDataSource for Arc<RevisionManager<Conne
} }
#[cfg(feature = "flowy_unit_test")] #[cfg(feature = "flowy_unit_test")]
impl<Connection> RevisionManager<Connection> { impl<Connection: 'static> RevisionManager<Connection> {
pub async fn revision_cache(&self) -> Arc<RevisionPersistence<Connection>> { pub async fn revision_cache(&self) -> Arc<RevisionPersistence<Connection>> {
self.rev_persistence.clone() self.rev_persistence.clone()
} }
pub fn ack_notify(&self) -> tokio::sync::broadcast::Receiver<i64> { pub fn ack_notify(&self) -> tokio::sync::broadcast::Receiver<i64> {
self.rev_ack_notifier.subscribe() self.rev_ack_notifier.subscribe()
} }
pub fn get_all_revision_records(&self) -> FlowyResult<Vec<crate::disk::SyncRecord>> {
self.rev_persistence.load_all_records(&self.object_id)
}
} }
pub struct RevisionLoader<Connection> { pub struct RevisionLoader<Connection> {
@ -244,7 +257,7 @@ pub struct RevisionLoader<Connection> {
impl<Connection: 'static> RevisionLoader<Connection> { impl<Connection: 'static> RevisionLoader<Connection> {
pub async fn load(&self) -> Result<(Vec<Revision>, i64), FlowyError> { pub async fn load(&self) -> Result<(Vec<Revision>, i64), FlowyError> {
let records = self.rev_persistence.batch_get(&self.object_id)?; let records = self.rev_persistence.load_all_records(&self.object_id)?;
let revisions: Vec<Revision>; let revisions: Vec<Revision>;
let mut rev_id = 0; let mut rev_id = 0;
if records.is_empty() && self.cloud.is_some() { if records.is_empty() && self.cloud.is_some() {
@ -278,8 +291,61 @@ impl<Connection: 'static> RevisionLoader<Connection> {
} }
pub async fn load_revisions(&self) -> Result<Vec<Revision>, FlowyError> { pub async fn load_revisions(&self) -> Result<Vec<Revision>, FlowyError> {
let records = self.rev_persistence.batch_get(&self.object_id)?; let records = self.rev_persistence.load_all_records(&self.object_id)?;
let revisions = records.into_iter().map(|record| record.revision).collect::<_>(); let revisions = records.into_iter().map(|record| record.revision).collect::<_>();
Ok(revisions) Ok(revisions)
} }
} }
/// Represents as the md5 of the revision object after applying the
/// revision. For example, RevisionMD5 will be the md5 of the document
/// content.
#[derive(Debug, Clone)]
pub struct RevisionMD5(String);
impl RevisionMD5 {
pub fn from_bytes<T: AsRef<[u8]>>(bytes: T) -> Result<Self, FlowyError> {
Ok(RevisionMD5(md5(bytes)))
}
pub fn into_inner(self) -> String {
self.0
}
pub fn is_equal(&self, s: &str) -> bool {
self.0 == s
}
}
impl std::convert::From<RevisionMD5> for String {
fn from(md5: RevisionMD5) -> Self {
md5.0
}
}
impl std::convert::From<&str> for RevisionMD5 {
fn from(s: &str) -> Self {
Self(s.to_owned())
}
}
impl std::convert::From<String> for RevisionMD5 {
fn from(s: String) -> Self {
Self(s)
}
}
impl std::ops::Deref for RevisionMD5 {
type Target = String;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl PartialEq<Self> for RevisionMD5 {
fn eq(&self, other: &Self) -> bool {
self.0 == other.0
}
}
impl std::cmp::Eq for RevisionMD5 {}

View file

@ -2,10 +2,9 @@ use crate::cache::{
disk::{RevisionChangeset, RevisionDiskCache}, disk::{RevisionChangeset, RevisionDiskCache},
memory::RevisionMemoryCacheDelegate, memory::RevisionMemoryCacheDelegate,
}; };
use crate::disk::{RevisionRecord, RevisionState}; use crate::disk::{RevisionState, SyncRecord};
use crate::memory::RevisionMemoryCache; use crate::memory::RevisionMemoryCache;
use crate::RevisionCompress; use crate::RevisionMergeable;
use flowy_error::{internal_error, FlowyError, FlowyResult}; use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_sync::entities::revision::{Revision, RevisionRange}; use flowy_sync::entities::revision::{Revision, RevisionRange};
use std::collections::VecDeque; use std::collections::VecDeque;
@ -15,31 +14,73 @@ use tokio::task::spawn_blocking;
pub const REVISION_WRITE_INTERVAL_IN_MILLIS: u64 = 600; pub const REVISION_WRITE_INTERVAL_IN_MILLIS: u64 = 600;
#[derive(Clone)]
pub struct RevisionPersistenceConfiguration {
merge_threshold: usize,
merge_lagging: bool,
}
impl RevisionPersistenceConfiguration {
pub fn new(merge_threshold: usize, merge_lagging: bool) -> Self {
debug_assert!(merge_threshold > 1);
if merge_threshold > 1 {
Self {
merge_threshold,
merge_lagging,
}
} else {
Self {
merge_threshold: 100,
merge_lagging,
}
}
}
}
impl std::default::Default for RevisionPersistenceConfiguration {
fn default() -> Self {
Self {
merge_threshold: 100,
merge_lagging: false,
}
}
}
pub struct RevisionPersistence<Connection> { pub struct RevisionPersistence<Connection> {
user_id: String, user_id: String,
object_id: String, object_id: String,
disk_cache: Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>, disk_cache: Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>,
memory_cache: Arc<RevisionMemoryCache>, memory_cache: Arc<RevisionMemoryCache>,
sync_seq: RwLock<RevisionSyncSequence>, sync_seq: RwLock<DeferSyncSequence>,
configuration: RevisionPersistenceConfiguration,
} }
impl<Connection: 'static> RevisionPersistence<Connection> { impl<Connection> RevisionPersistence<Connection>
pub fn new<C>(user_id: &str, object_id: &str, disk_cache: C) -> RevisionPersistence<Connection> where
Connection: 'static,
{
pub fn new<C>(
user_id: &str,
object_id: &str,
disk_cache: C,
configuration: RevisionPersistenceConfiguration,
) -> RevisionPersistence<Connection>
where where
C: 'static + RevisionDiskCache<Connection, Error = FlowyError>, C: 'static + RevisionDiskCache<Connection, Error = FlowyError>,
{ {
let disk_cache = Arc::new(disk_cache) as Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>; let disk_cache = Arc::new(disk_cache) as Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>;
Self::from_disk_cache(user_id, object_id, disk_cache) Self::from_disk_cache(user_id, object_id, disk_cache, configuration)
} }
pub fn from_disk_cache( pub fn from_disk_cache(
user_id: &str, user_id: &str,
object_id: &str, object_id: &str,
disk_cache: Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>, disk_cache: Arc<dyn RevisionDiskCache<Connection, Error = FlowyError>>,
configuration: RevisionPersistenceConfiguration,
) -> RevisionPersistence<Connection> { ) -> RevisionPersistence<Connection> {
let object_id = object_id.to_owned(); let object_id = object_id.to_owned();
let user_id = user_id.to_owned(); let user_id = user_id.to_owned();
let sync_seq = RwLock::new(RevisionSyncSequence::new()); let sync_seq = RwLock::new(DeferSyncSequence::new());
let memory_cache = Arc::new(RevisionMemoryCache::new(&object_id, Arc::new(disk_cache.clone()))); let memory_cache = Arc::new(RevisionMemoryCache::new(&object_id, Arc::new(disk_cache.clone())));
Self { Self {
user_id, user_id,
@ -47,6 +88,7 @@ impl<Connection: 'static> RevisionPersistence<Connection> {
disk_cache, disk_cache,
memory_cache, memory_cache,
sync_seq, sync_seq,
configuration,
} }
} }
@ -62,7 +104,37 @@ impl<Connection: 'static> RevisionPersistence<Connection> {
pub(crate) async fn sync_revision(&self, revision: &Revision) -> FlowyResult<()> { pub(crate) async fn sync_revision(&self, revision: &Revision) -> FlowyResult<()> {
tracing::Span::current().record("rev_id", &revision.rev_id); tracing::Span::current().record("rev_id", &revision.rev_id);
self.add(revision.clone(), RevisionState::Sync, false).await?; self.add(revision.clone(), RevisionState::Sync, false).await?;
self.sync_seq.write().await.add(revision.rev_id)?; self.sync_seq.write().await.recv(revision.rev_id)?;
Ok(())
}
#[tracing::instrument(level = "trace", skip_all, err)]
pub async fn compact_lagging_revisions<'a>(
&'a self,
rev_compress: &Arc<dyn RevisionMergeable + 'a>,
) -> FlowyResult<()> {
if !self.configuration.merge_lagging {
return Ok(());
}
let mut sync_seq = self.sync_seq.write().await;
let compact_seq = sync_seq.compact();
if !compact_seq.is_empty() {
let range = RevisionRange {
start: *compact_seq.front().unwrap(),
end: *compact_seq.back().unwrap(),
};
let revisions = self.revisions_in_range(&range).await?;
debug_assert_eq!(range.len() as usize, revisions.len());
// compact multiple revisions into one
let merged_revision = rev_compress.merge_revisions(&self.user_id, &self.object_id, revisions)?;
tracing::Span::current().record("rev_id", &merged_revision.rev_id);
let _ = sync_seq.recv(merged_revision.rev_id)?;
// replace the revisions in range with compact revision
self.compact(&range, merged_revision).await?;
}
Ok(()) Ok(())
} }
@ -70,44 +142,46 @@ impl<Connection: 'static> RevisionPersistence<Connection> {
#[tracing::instrument(level = "trace", skip_all, fields(rev_id, compact_range, object_id=%self.object_id), err)] #[tracing::instrument(level = "trace", skip_all, fields(rev_id, compact_range, object_id=%self.object_id), err)]
pub(crate) async fn add_sync_revision<'a>( pub(crate) async fn add_sync_revision<'a>(
&'a self, &'a self,
revision: &'a Revision, new_revision: &'a Revision,
rev_compress: &Arc<dyn RevisionCompress + 'a>, rev_compress: &Arc<dyn RevisionMergeable + 'a>,
) -> FlowyResult<i64> { ) -> FlowyResult<i64> {
let mut sync_seq_write_guard = self.sync_seq.write().await; let mut sync_seq = self.sync_seq.write().await;
let result = sync_seq_write_guard.compact(); let compact_length = sync_seq.compact_length;
match result {
None => {
tracing::Span::current().record("rev_id", &revision.rev_id);
self.add(revision.clone(), RevisionState::Sync, true).await?;
sync_seq_write_guard.add(revision.rev_id)?;
Ok(revision.rev_id)
}
Some((range, mut compact_seq)) => {
tracing::Span::current().record("compact_range", &format!("{}", range).as_str());
let mut revisions = self.revisions_in_range(&range).await?;
if range.to_rev_ids().len() != revisions.len() {
debug_assert_eq!(range.to_rev_ids().len(), revisions.len());
}
// append the new revision // Before the new_revision is pushed into the sync_seq, we check if the current `compact_length` of the
revisions.push(revision.clone()); // sync_seq is less equal to or greater than the merge threshold. If yes, it's needs to merged
// with the new_revision into one revision.
let mut compact_seq = VecDeque::default();
// tracing::info!("{}", compact_seq)
if compact_length >= self.configuration.merge_threshold - 1 {
compact_seq.extend(sync_seq.compact());
}
if !compact_seq.is_empty() {
let range = RevisionRange {
start: *compact_seq.front().unwrap(),
end: *compact_seq.back().unwrap(),
};
// compact multiple revisions into one tracing::Span::current().record("compact_range", &format!("{}", range).as_str());
let compact_revision = rev_compress.compress_revisions(&self.user_id, &self.object_id, revisions)?; let mut revisions = self.revisions_in_range(&range).await?;
let rev_id = compact_revision.rev_id; debug_assert_eq!(range.len() as usize, revisions.len());
tracing::Span::current().record("rev_id", &rev_id); // append the new revision
revisions.push(new_revision.clone());
// insert new revision // compact multiple revisions into one
compact_seq.push_back(rev_id); let merged_revision = rev_compress.merge_revisions(&self.user_id, &self.object_id, revisions)?;
let rev_id = merged_revision.rev_id;
tracing::Span::current().record("rev_id", &merged_revision.rev_id);
let _ = sync_seq.recv(merged_revision.rev_id)?;
// replace the revisions in range with compact revision // replace the revisions in range with compact revision
self.compact(&range, compact_revision).await?; self.compact(&range, merged_revision).await?;
// Ok(rev_id)
debug_assert_eq!(compact_seq.len(), 2); } else {
debug_assert_eq!(sync_seq_write_guard.len(), compact_seq.len()); tracing::Span::current().record("rev_id", &new_revision.rev_id);
sync_seq_write_guard.reset(compact_seq); self.add(new_revision.clone(), RevisionState::Sync, true).await?;
Ok(rev_id) sync_seq.merge_recv(new_revision.rev_id)?;
} Ok(new_revision.rev_id)
} }
} }
@ -126,12 +200,30 @@ impl<Connection: 'static> RevisionPersistence<Connection> {
} }
} }
pub(crate) async fn next_sync_rev_id(&self) -> Option<i64> {
self.sync_seq.read().await.next_rev_id()
}
pub(crate) fn number_of_sync_records(&self) -> usize {
self.memory_cache.number_of_sync_records()
}
pub(crate) fn number_of_records_in_disk(&self) -> usize {
match self.disk_cache.read_revision_records(&self.object_id, None) {
Ok(records) => records.len(),
Err(e) => {
tracing::error!("Read revision records failed: {:?}", e);
0
}
}
}
/// The cache gets reset while it conflicts with the remote revisions. /// The cache gets reset while it conflicts with the remote revisions.
#[tracing::instrument(level = "trace", skip(self, revisions), err)] #[tracing::instrument(level = "trace", skip(self, revisions), err)]
pub(crate) async fn reset(&self, revisions: Vec<Revision>) -> FlowyResult<()> { pub(crate) async fn reset(&self, revisions: Vec<Revision>) -> FlowyResult<()> {
let records = revisions let records = revisions
.into_iter() .into_iter()
.map(|revision| RevisionRecord { .map(|revision| SyncRecord {
revision, revision,
state: RevisionState::Sync, state: RevisionState::Sync,
write_to_disk: false, write_to_disk: false,
@ -151,7 +243,7 @@ impl<Connection: 'static> RevisionPersistence<Connection> {
tracing::warn!("Duplicate revision: {}:{}-{:?}", self.object_id, revision.rev_id, state); tracing::warn!("Duplicate revision: {}:{}-{:?}", self.object_id, revision.rev_id, state);
return Ok(()); return Ok(());
} }
let record = RevisionRecord { let record = SyncRecord {
revision, revision,
state, state,
write_to_disk, write_to_disk,
@ -167,12 +259,11 @@ impl<Connection: 'static> RevisionPersistence<Connection> {
let _ = self let _ = self
.disk_cache .disk_cache
.delete_revision_records(&self.object_id, Some(rev_ids))?; .delete_revision_records(&self.object_id, Some(rev_ids))?;
self.add(new_revision, RevisionState::Sync, true).await?; self.add(new_revision, RevisionState::Sync, true).await?;
Ok(()) Ok(())
} }
pub async fn get(&self, rev_id: i64) -> Option<RevisionRecord> { pub async fn get(&self, rev_id: i64) -> Option<SyncRecord> {
match self.memory_cache.get(&rev_id).await { match self.memory_cache.get(&rev_id).await {
None => match self None => match self
.disk_cache .disk_cache
@ -192,8 +283,8 @@ impl<Connection: 'static> RevisionPersistence<Connection> {
} }
} }
pub fn batch_get(&self, doc_id: &str) -> FlowyResult<Vec<RevisionRecord>> { pub fn load_all_records(&self, object_id: &str) -> FlowyResult<Vec<SyncRecord>> {
self.disk_cache.read_revision_records(doc_id, None) self.disk_cache.read_revision_records(object_id, None)
} }
// Read the revision which rev_id >= range.start && rev_id <= range.end // Read the revision which rev_id >= range.start && rev_id <= range.end
@ -225,7 +316,7 @@ impl<Connection: 'static> RevisionPersistence<Connection> {
} }
impl<C> RevisionMemoryCacheDelegate for Arc<dyn RevisionDiskCache<C, Error = FlowyError>> { impl<C> RevisionMemoryCacheDelegate for Arc<dyn RevisionDiskCache<C, Error = FlowyError>> {
fn checkpoint_tick(&self, mut records: Vec<RevisionRecord>) -> FlowyResult<()> { fn send_sync(&self, mut records: Vec<SyncRecord>) -> FlowyResult<()> {
records.retain(|record| record.write_to_disk); records.retain(|record| record.write_to_disk);
if !records.is_empty() { if !records.is_empty() {
tracing::Span::current().record( tracing::Span::current().record(
@ -251,27 +342,48 @@ impl<C> RevisionMemoryCacheDelegate for Arc<dyn RevisionDiskCache<C, Error = Flo
} }
#[derive(Default)] #[derive(Default)]
struct RevisionSyncSequence(VecDeque<i64>); struct DeferSyncSequence {
impl RevisionSyncSequence { rev_ids: VecDeque<i64>,
compact_index: Option<usize>,
compact_length: usize,
}
impl DeferSyncSequence {
fn new() -> Self { fn new() -> Self {
RevisionSyncSequence::default() DeferSyncSequence::default()
} }
fn add(&mut self, new_rev_id: i64) -> FlowyResult<()> { /// Pushes the new_rev_id to the end of the list and marks this new_rev_id is mergeable.
///
/// When calling `compact` method, it will return a list of revision ids started from
/// the `compact_start_pos`, and ends with the `compact_length`.
fn merge_recv(&mut self, new_rev_id: i64) -> FlowyResult<()> {
let _ = self.recv(new_rev_id)?;
self.compact_length += 1;
if self.compact_index.is_none() && !self.rev_ids.is_empty() {
self.compact_index = Some(self.rev_ids.len() - 1);
}
Ok(())
}
/// Pushes the new_rev_id to the end of the list.
fn recv(&mut self, new_rev_id: i64) -> FlowyResult<()> {
// The last revision's rev_id must be greater than the new one. // The last revision's rev_id must be greater than the new one.
if let Some(rev_id) = self.0.back() { if let Some(rev_id) = self.rev_ids.back() {
if *rev_id >= new_rev_id { if *rev_id >= new_rev_id {
return Err( return Err(
FlowyError::internal().context(format!("The new revision's id must be greater than {}", rev_id)) FlowyError::internal().context(format!("The new revision's id must be greater than {}", rev_id))
); );
} }
} }
self.0.push_back(new_rev_id); self.rev_ids.push_back(new_rev_id);
Ok(()) Ok(())
} }
/// Removes the rev_id from the list
fn ack(&mut self, rev_id: &i64) -> FlowyResult<()> { fn ack(&mut self, rev_id: &i64) -> FlowyResult<()> {
let cur_rev_id = self.0.front().cloned(); let cur_rev_id = self.rev_ids.front().cloned();
if let Some(pop_rev_id) = cur_rev_id { if let Some(pop_rev_id) = cur_rev_id {
if &pop_rev_id != rev_id { if &pop_rev_id != rev_id {
let desc = format!( let desc = format!(
@ -280,38 +392,43 @@ impl RevisionSyncSequence {
); );
return Err(FlowyError::internal().context(desc)); return Err(FlowyError::internal().context(desc));
} }
let _ = self.0.pop_front();
let mut compact_rev_id = None;
if let Some(compact_index) = self.compact_index {
compact_rev_id = self.rev_ids.get(compact_index).cloned();
}
let pop_rev_id = self.rev_ids.pop_front();
if let (Some(compact_rev_id), Some(pop_rev_id)) = (compact_rev_id, pop_rev_id) {
if compact_rev_id <= pop_rev_id && self.compact_length > 0 {
self.compact_length -= 1;
}
}
} }
Ok(()) Ok(())
} }
fn next_rev_id(&self) -> Option<i64> { fn next_rev_id(&self) -> Option<i64> {
self.0.front().cloned() self.rev_ids.front().cloned()
}
fn reset(&mut self, new_seq: VecDeque<i64>) {
self.0 = new_seq;
} }
fn clear(&mut self) { fn clear(&mut self) {
self.0.clear(); self.compact_index = None;
} self.compact_length = 0;
self.rev_ids.clear();
fn len(&self) -> usize {
self.0.len()
} }
// Compact the rev_ids into one except the current synchronizing rev_id. // Compact the rev_ids into one except the current synchronizing rev_id.
fn compact(&self) -> Option<(RevisionRange, VecDeque<i64>)> { fn compact(&mut self) -> VecDeque<i64> {
// Make sure there are two rev_id going to sync. No need to compact if there is only let mut compact_seq = VecDeque::with_capacity(self.rev_ids.len());
// one rev_id in queue. if let Some(start) = self.compact_index {
self.next_rev_id()?; if start < self.rev_ids.len() {
let seq = self.rev_ids.split_off(start);
let mut new_seq = self.0.clone(); compact_seq.extend(seq);
let mut drained = new_seq.drain(1..).collect::<VecDeque<_>>(); }
}
let start = drained.pop_front()?; self.compact_index = None;
let end = drained.pop_back().unwrap_or(start); self.compact_length = 0;
Some((RevisionRange { start, end }, new_seq)) compact_seq
} }
} }

View file

@ -28,7 +28,7 @@ pub trait RevisionWSDataStream: Send + Sync {
} }
// The sink provides the data that will be sent through the web socket to the // The sink provides the data that will be sent through the web socket to the
// backend. // server.
pub trait RevisionWebSocketSink: Send + Sync { pub trait RevisionWebSocketSink: Send + Sync {
fn next(&self) -> FutureResult<Option<ClientRevisionWSData>, FlowyError>; fn next(&self) -> FutureResult<Option<ClientRevisionWSData>, FlowyError>;
} }

View file

@ -0,0 +1 @@
mod revision_test;

View file

@ -0,0 +1,318 @@
use crate::revision_test::script::{RevisionScript::*, RevisionTest};
#[tokio::test]
async fn revision_sync_test() {
let test = RevisionTest::new().await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "123".to_string(),
base_rev_id,
rev_id,
})
.await;
test.run_script(AssertNextSyncRevisionId { rev_id: Some(rev_id) }).await;
test.run_script(AckRevision { rev_id }).await;
test.run_script(AssertNextSyncRevisionId { rev_id: None }).await;
}
#[tokio::test]
async fn revision_compress_2_revisions_with_2_threshold_test() {
let test = RevisionTest::new_with_configuration(2).await;
test.run_script(AddLocalRevision2 {
content: "123".to_string(),
pair_rev_id: test.next_rev_id_pair(),
})
.await;
test.run_script(AddLocalRevision2 {
content: "456".to_string(),
pair_rev_id: test.next_rev_id_pair(),
})
.await;
test.run_scripts(vec![
AssertNextSyncRevisionId { rev_id: Some(1) },
AckRevision { rev_id: 1 },
AssertNextSyncRevisionId { rev_id: None },
])
.await;
}
#[tokio::test]
async fn revision_compress_4_revisions_with_threshold_2_test() {
let test = RevisionTest::new_with_configuration(2).await;
let (base_rev_id, rev_id_1) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "1".to_string(),
base_rev_id,
rev_id: rev_id_1,
})
.await;
let (base_rev_id, rev_id_2) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "2".to_string(),
base_rev_id,
rev_id: rev_id_2,
})
.await;
let (base_rev_id, rev_id_3) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "3".to_string(),
base_rev_id,
rev_id: rev_id_3,
})
.await;
let (base_rev_id, rev_id_4) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "4".to_string(),
base_rev_id,
rev_id: rev_id_4,
})
.await;
// rev_id_2,rev_id_3,rev_id4 will be merged into rev_id_1
test.run_scripts(vec![
AssertNumberOfSyncRevisions { num: 2 },
AssertNextSyncRevisionId { rev_id: Some(rev_id_1) },
AssertNextSyncRevisionContent {
expected: "12".to_string(),
},
AckRevision { rev_id: rev_id_1 },
AssertNextSyncRevisionId { rev_id: Some(rev_id_2) },
AssertNextSyncRevisionContent {
expected: "34".to_string(),
},
])
.await;
}
#[tokio::test]
async fn revision_compress_8_revisions_with_threshold_4_test() {
let test = RevisionTest::new_with_configuration(4).await;
let (base_rev_id, rev_id_1) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "1".to_string(),
base_rev_id,
rev_id: rev_id_1,
})
.await;
let (base_rev_id, rev_id_2) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "2".to_string(),
base_rev_id,
rev_id: rev_id_2,
})
.await;
let (base_rev_id, rev_id_3) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "3".to_string(),
base_rev_id,
rev_id: rev_id_3,
})
.await;
let (base_rev_id, rev_id_4) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "4".to_string(),
base_rev_id,
rev_id: rev_id_4,
})
.await;
let (base_rev_id, rev_id_a) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "a".to_string(),
base_rev_id,
rev_id: rev_id_a,
})
.await;
let (base_rev_id, rev_id_b) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "b".to_string(),
base_rev_id,
rev_id: rev_id_b,
})
.await;
let (base_rev_id, rev_id_c) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "c".to_string(),
base_rev_id,
rev_id: rev_id_c,
})
.await;
let (base_rev_id, rev_id_d) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "d".to_string(),
base_rev_id,
rev_id: rev_id_d,
})
.await;
test.run_scripts(vec![
AssertNumberOfSyncRevisions { num: 2 },
AssertNextSyncRevisionId { rev_id: Some(rev_id_1) },
AssertNextSyncRevisionContent {
expected: "1234".to_string(),
},
AckRevision { rev_id: rev_id_1 },
AssertNextSyncRevisionId { rev_id: Some(rev_id_a) },
AssertNextSyncRevisionContent {
expected: "abcd".to_string(),
},
AckRevision { rev_id: rev_id_a },
AssertNextSyncRevisionId { rev_id: None },
])
.await;
}
#[tokio::test]
async fn revision_merge_per_5_revision_test() {
let test = RevisionTest::new_with_configuration(5).await;
for i in 0..20 {
let content = format!("{}", i);
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content,
base_rev_id,
rev_id,
})
.await;
}
test.run_scripts(vec![
AssertNumberOfSyncRevisions { num: 4 },
AssertNextSyncRevisionContent {
expected: "01234".to_string(),
},
AckRevision { rev_id: 1 },
AssertNextSyncRevisionContent {
expected: "56789".to_string(),
},
AckRevision { rev_id: 2 },
AssertNextSyncRevisionContent {
expected: "1011121314".to_string(),
},
AckRevision { rev_id: 3 },
AssertNextSyncRevisionContent {
expected: "1516171819".to_string(),
},
AckRevision { rev_id: 4 },
AssertNextSyncRevisionId { rev_id: None },
])
.await;
}
#[tokio::test]
async fn revision_merge_per_100_revision_test() {
let test = RevisionTest::new_with_configuration(100).await;
for i in 0..1000 {
let content = format!("{}", i);
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content,
base_rev_id,
rev_id,
})
.await;
}
test.run_scripts(vec![AssertNumberOfSyncRevisions { num: 10 }]).await;
}
#[tokio::test]
async fn revision_merge_per_100_revision_test2() {
let test = RevisionTest::new_with_configuration(100).await;
for i in 0..50 {
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: format!("{}", i),
base_rev_id,
rev_id,
})
.await;
}
test.run_scripts(vec![AssertNumberOfSyncRevisions { num: 50 }]).await;
}
#[tokio::test]
async fn revision_merge_per_1000_revision_test() {
let test = RevisionTest::new_with_configuration(1000).await;
for i in 0..100000 {
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: format!("{}", i),
base_rev_id,
rev_id,
})
.await;
}
test.run_scripts(vec![AssertNumberOfSyncRevisions { num: 100 }]).await;
}
#[tokio::test]
async fn revision_compress_revision_test() {
let test = RevisionTest::new_with_configuration(2).await;
test.run_scripts(vec![
AddLocalRevision2 {
content: "1".to_string(),
pair_rev_id: test.next_rev_id_pair(),
},
AddLocalRevision2 {
content: "2".to_string(),
pair_rev_id: test.next_rev_id_pair(),
},
AddLocalRevision2 {
content: "3".to_string(),
pair_rev_id: test.next_rev_id_pair(),
},
AddLocalRevision2 {
content: "4".to_string(),
pair_rev_id: test.next_rev_id_pair(),
},
AssertNumberOfSyncRevisions { num: 2 },
])
.await;
}
#[tokio::test]
async fn revision_compress_revision_while_recv_ack_test() {
let test = RevisionTest::new_with_configuration(2).await;
test.run_scripts(vec![
AddLocalRevision2 {
content: "1".to_string(),
pair_rev_id: test.next_rev_id_pair(),
},
AckRevision { rev_id: 1 },
AddLocalRevision2 {
content: "2".to_string(),
pair_rev_id: test.next_rev_id_pair(),
},
AckRevision { rev_id: 2 },
AddLocalRevision2 {
content: "3".to_string(),
pair_rev_id: test.next_rev_id_pair(),
},
AckRevision { rev_id: 3 },
AddLocalRevision2 {
content: "4".to_string(),
pair_rev_id: test.next_rev_id_pair(),
},
AssertNumberOfSyncRevisions { num: 4 },
])
.await;
}

View file

@ -0,0 +1,3 @@
mod local_revision_test;
mod revision_disk_test;
mod script;

View file

@ -0,0 +1,104 @@
use crate::revision_test::script::RevisionScript::*;
use crate::revision_test::script::{InvalidRevisionObject, RevisionTest};
#[tokio::test]
async fn revision_write_to_disk_test() {
let test = RevisionTest::new_with_configuration(2).await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: "123".to_string(),
base_rev_id,
rev_id,
})
.await;
test.run_scripts(vec![
AssertNumberOfRevisionsInDisk { num: 0 },
WaitWhenWriteToDisk,
AssertNumberOfRevisionsInDisk { num: 1 },
])
.await;
}
#[tokio::test]
async fn revision_write_to_disk_with_merge_test() {
let test = RevisionTest::new_with_configuration(100).await;
for i in 0..1000 {
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_script(AddLocalRevision {
content: format!("{}", i),
base_rev_id,
rev_id,
})
.await;
}
test.run_scripts(vec![
AssertNumberOfRevisionsInDisk { num: 0 },
AssertNumberOfSyncRevisions { num: 10 },
WaitWhenWriteToDisk,
AssertNumberOfRevisionsInDisk { num: 10 },
])
.await;
}
#[tokio::test]
async fn revision_read_from_disk_test() {
let test = RevisionTest::new_with_configuration(2).await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_scripts(vec![
AddLocalRevision {
content: "123".to_string(),
base_rev_id,
rev_id,
},
AssertNumberOfRevisionsInDisk { num: 0 },
WaitWhenWriteToDisk,
AssertNumberOfRevisionsInDisk { num: 1 },
])
.await;
let test = RevisionTest::new_with_other(test).await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_scripts(vec![
AssertNextSyncRevisionId { rev_id: Some(1) },
AddLocalRevision {
content: "456".to_string(),
base_rev_id,
rev_id,
},
AckRevision { rev_id: 1 },
AssertNextSyncRevisionId { rev_id: Some(rev_id) },
])
.await;
}
#[tokio::test]
async fn revision_read_from_disk_with_invalid_record_test() {
let test = RevisionTest::new_with_configuration(2).await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_scripts(vec![AddLocalRevision {
content: "123".to_string(),
base_rev_id,
rev_id,
}])
.await;
let (base_rev_id, rev_id) = test.next_rev_id_pair();
test.run_scripts(vec![
AddInvalidLocalRevision {
bytes: InvalidRevisionObject::new().to_bytes(),
base_rev_id,
rev_id,
},
WaitWhenWriteToDisk,
])
.await;
let test = RevisionTest::new_with_other(test).await;
test.run_scripts(vec![AssertNextSyncRevisionContent {
expected: "123".to_string(),
}])
.await;
}

View file

@ -0,0 +1,377 @@
use bytes::Bytes;
use flowy_error::{internal_error, FlowyError, FlowyResult};
use flowy_revision::disk::{RevisionChangeset, RevisionDiskCache, SyncRecord};
use flowy_revision::{
RevisionManager, RevisionMergeable, RevisionObjectDeserializer, RevisionPersistence,
RevisionPersistenceConfiguration, RevisionSnapshotDiskCache, RevisionSnapshotInfo,
REVISION_WRITE_INTERVAL_IN_MILLIS,
};
use flowy_sync::entities::revision::{Revision, RevisionRange};
use flowy_sync::util::md5;
use nanoid::nanoid;
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use std::time::Duration;
pub enum RevisionScript {
AddLocalRevision {
content: String,
base_rev_id: i64,
rev_id: i64,
},
AddLocalRevision2 {
content: String,
pair_rev_id: (i64, i64),
},
AddInvalidLocalRevision {
bytes: Vec<u8>,
base_rev_id: i64,
rev_id: i64,
},
AckRevision {
rev_id: i64,
},
AssertNextSyncRevisionId {
rev_id: Option<i64>,
},
AssertNumberOfSyncRevisions {
num: usize,
},
AssertNumberOfRevisionsInDisk {
num: usize,
},
AssertNextSyncRevisionContent {
expected: String,
},
WaitWhenWriteToDisk,
}
pub struct RevisionTest {
user_id: String,
object_id: String,
configuration: RevisionPersistenceConfiguration,
rev_manager: Arc<RevisionManager<RevisionConnectionMock>>,
}
impl RevisionTest {
pub async fn new() -> Self {
Self::new_with_configuration(2).await
}
pub async fn new_with_configuration(merge_threshold: i64) -> Self {
let user_id = nanoid!(10);
let object_id = nanoid!(6);
let configuration = RevisionPersistenceConfiguration::new(merge_threshold as usize, false);
let disk_cache = RevisionDiskCacheMock::new(vec![]);
let persistence = RevisionPersistence::new(&user_id, &object_id, disk_cache, configuration.clone());
let compress = RevisionCompressMock {};
let snapshot = RevisionSnapshotMock {};
let mut rev_manager = RevisionManager::new(&user_id, &object_id, persistence, compress, snapshot);
rev_manager.initialize::<RevisionObjectMockSerde>(None).await.unwrap();
Self {
user_id,
object_id,
configuration,
rev_manager: Arc::new(rev_manager),
}
}
pub async fn new_with_other(old_test: RevisionTest) -> Self {
let records = old_test.rev_manager.get_all_revision_records().unwrap();
let disk_cache = RevisionDiskCacheMock::new(records);
let configuration = old_test.configuration;
let persistence = RevisionPersistence::new(
&old_test.user_id,
&old_test.object_id,
disk_cache,
configuration.clone(),
);
let compress = RevisionCompressMock {};
let snapshot = RevisionSnapshotMock {};
let mut rev_manager =
RevisionManager::new(&old_test.user_id, &old_test.object_id, persistence, compress, snapshot);
rev_manager.initialize::<RevisionObjectMockSerde>(None).await.unwrap();
Self {
user_id: old_test.user_id,
object_id: old_test.object_id,
configuration,
rev_manager: Arc::new(rev_manager),
}
}
pub async fn run_scripts(&self, scripts: Vec<RevisionScript>) {
for script in scripts {
self.run_script(script).await;
}
}
pub fn next_rev_id_pair(&self) -> (i64, i64) {
self.rev_manager.next_rev_id_pair()
}
pub async fn run_script(&self, script: RevisionScript) {
match script {
RevisionScript::AddLocalRevision {
content,
base_rev_id,
rev_id,
} => {
let object = RevisionObjectMock::new(&content);
let bytes = object.to_bytes();
let md5 = md5(&bytes);
let revision = Revision::new(
&self.rev_manager.object_id,
base_rev_id,
rev_id,
Bytes::from(bytes),
md5,
);
self.rev_manager.add_local_revision(&revision).await.unwrap();
}
RevisionScript::AddLocalRevision2 { content, pair_rev_id } => {
let object = RevisionObjectMock::new(&content);
let bytes = object.to_bytes();
let md5 = md5(&bytes);
let revision = Revision::new(
&self.rev_manager.object_id,
pair_rev_id.0,
pair_rev_id.1,
Bytes::from(bytes),
md5,
);
self.rev_manager.add_local_revision(&revision).await.unwrap();
}
RevisionScript::AddInvalidLocalRevision {
bytes,
base_rev_id,
rev_id,
} => {
let md5 = md5(&bytes);
let revision = Revision::new(
&self.rev_manager.object_id,
base_rev_id,
rev_id,
Bytes::from(bytes),
md5,
);
self.rev_manager.add_local_revision(&revision).await.unwrap();
}
RevisionScript::AckRevision { rev_id } => {
//
self.rev_manager.ack_revision(rev_id).await.unwrap()
}
RevisionScript::AssertNextSyncRevisionId { rev_id } => {
assert_eq!(self.rev_manager.next_sync_rev_id().await, rev_id)
}
RevisionScript::AssertNumberOfSyncRevisions { num } => {
assert_eq!(self.rev_manager.number_of_sync_revisions(), num)
}
RevisionScript::AssertNumberOfRevisionsInDisk { num } => {
assert_eq!(self.rev_manager.number_of_revisions_in_disk(), num)
}
RevisionScript::AssertNextSyncRevisionContent { expected } => {
//
let rev_id = self.rev_manager.next_sync_rev_id().await.unwrap();
let revision = self.rev_manager.get_revision(rev_id).await.unwrap();
let object = RevisionObjectMock::from_bytes(&revision.bytes).unwrap();
assert_eq!(object.content, expected);
}
RevisionScript::WaitWhenWriteToDisk => {
let milliseconds = 2 * REVISION_WRITE_INTERVAL_IN_MILLIS;
tokio::time::sleep(Duration::from_millis(milliseconds)).await;
}
}
}
}
pub struct RevisionDiskCacheMock {
records: RwLock<Vec<SyncRecord>>,
}
impl RevisionDiskCacheMock {
pub fn new(records: Vec<SyncRecord>) -> Self {
Self {
records: RwLock::new(records),
}
}
}
impl RevisionDiskCache<RevisionConnectionMock> for RevisionDiskCacheMock {
type Error = FlowyError;
fn create_revision_records(&self, revision_records: Vec<SyncRecord>) -> Result<(), Self::Error> {
self.records.write().extend(revision_records);
Ok(())
}
fn get_connection(&self) -> Result<RevisionConnectionMock, Self::Error> {
todo!()
}
fn read_revision_records(
&self,
_object_id: &str,
rev_ids: Option<Vec<i64>>,
) -> Result<Vec<SyncRecord>, Self::Error> {
match rev_ids {
None => Ok(self.records.read().clone()),
Some(rev_ids) => Ok(self
.records
.read()
.iter()
.filter(|record| rev_ids.contains(&record.revision.rev_id))
.cloned()
.collect::<Vec<SyncRecord>>()),
}
}
fn read_revision_records_with_range(
&self,
_object_id: &str,
range: &RevisionRange,
) -> Result<Vec<SyncRecord>, Self::Error> {
let read_guard = self.records.read();
let records = range
.iter()
.flat_map(|rev_id| {
read_guard
.iter()
.find(|record| record.revision.rev_id == rev_id)
.cloned()
})
.collect::<Vec<SyncRecord>>();
Ok(records)
}
fn update_revision_record(&self, changesets: Vec<RevisionChangeset>) -> FlowyResult<()> {
for changeset in changesets {
if let Some(record) = self
.records
.write()
.iter_mut()
.find(|record| record.revision.rev_id == *changeset.rev_id.as_ref())
{
record.state = changeset.state;
}
}
Ok(())
}
fn delete_revision_records(&self, _object_id: &str, rev_ids: Option<Vec<i64>>) -> Result<(), Self::Error> {
match rev_ids {
None => {}
Some(rev_ids) => {
for rev_id in rev_ids {
if let Some(index) = self
.records
.read()
.iter()
.position(|record| record.revision.rev_id == rev_id)
{
self.records.write().remove(index);
}
}
}
}
Ok(())
}
fn delete_and_insert_records(
&self,
_object_id: &str,
_deleted_rev_ids: Option<Vec<i64>>,
_inserted_records: Vec<SyncRecord>,
) -> Result<(), Self::Error> {
todo!()
}
}
pub struct RevisionConnectionMock {}
pub struct RevisionSnapshotMock {}
impl RevisionSnapshotDiskCache for RevisionSnapshotMock {
fn write_snapshot(&self, _object_id: &str, _rev_id: i64, _data: Vec<u8>) -> FlowyResult<()> {
todo!()
}
fn read_snapshot(&self, _object_id: &str, _rev_id: i64) -> FlowyResult<RevisionSnapshotInfo> {
todo!()
}
}
pub struct RevisionCompressMock {}
impl RevisionMergeable for RevisionCompressMock {
fn combine_revisions(&self, revisions: Vec<Revision>) -> FlowyResult<Bytes> {
let mut object = RevisionObjectMock::new("");
for revision in revisions {
if let Ok(other) = RevisionObjectMock::from_bytes(&revision.bytes) {
let _ = object.compose(other)?;
}
}
Ok(Bytes::from(object.to_bytes()))
}
}
#[derive(Serialize, Deserialize)]
pub struct InvalidRevisionObject {
data: String,
}
impl InvalidRevisionObject {
pub fn new() -> Self {
InvalidRevisionObject { data: "".to_string() }
}
pub(crate) fn to_bytes(&self) -> Vec<u8> {
serde_json::to_vec(self).unwrap()
}
// fn from_bytes(bytes: &[u8]) -> Self {
// serde_json::from_slice(bytes).unwrap()
// }
}
#[derive(Serialize, Deserialize)]
pub struct RevisionObjectMock {
content: String,
}
impl RevisionObjectMock {
pub fn new(s: &str) -> Self {
Self { content: s.to_owned() }
}
pub fn compose(&mut self, other: RevisionObjectMock) -> FlowyResult<()> {
self.content.push_str(other.content.as_str());
Ok(())
}
pub fn to_bytes(&self) -> Vec<u8> {
serde_json::to_vec(self).unwrap()
}
pub fn from_bytes(bytes: &[u8]) -> FlowyResult<Self> {
serde_json::from_slice(bytes).map_err(internal_error)
}
}
pub struct RevisionObjectMockSerde();
impl RevisionObjectDeserializer for RevisionObjectMockSerde {
type Output = RevisionObjectMock;
fn deserialize_revisions(_object_id: &str, revisions: Vec<Revision>) -> FlowyResult<Self::Output> {
let mut object = RevisionObjectMock::new("");
if revisions.is_empty() {
return Ok(object);
}
for revision in revisions {
if let Ok(revision_object) = RevisionObjectMock::from_bytes(&revision.bytes) {
let _ = object.compose(revision_object)?;
}
}
Ok(object)
}
}

View file

@ -18,7 +18,7 @@ use flowy_net::{
http_server::folder::FolderHttpCloudService, local_server::LocalServer, ws::connection::FlowyWebSocketConnect, http_server::folder::FolderHttpCloudService, local_server::LocalServer, ws::connection::FlowyWebSocketConnect,
}; };
use flowy_revision::{RevisionWebSocket, WSStateReceiver}; use flowy_revision::{RevisionWebSocket, WSStateReceiver};
use flowy_sync::entities::revision::{RepeatedRevision, Revision}; use flowy_sync::entities::revision::Revision;
use flowy_sync::entities::ws_data::ClientRevisionWSData; use flowy_sync::entities::ws_data::ClientRevisionWSData;
use flowy_user::services::UserSession; use flowy_user::services::UserSession;
use futures_core::future::BoxFuture; use futures_core::future::BoxFuture;
@ -144,19 +144,19 @@ struct DocumentViewDataProcessor(Arc<DocumentManager>);
impl ViewDataProcessor for DocumentViewDataProcessor { impl ViewDataProcessor for DocumentViewDataProcessor {
fn create_view( fn create_view(
&self, &self,
user_id: &str, _user_id: &str,
view_id: &str, view_id: &str,
layout: ViewLayoutTypePB, layout: ViewLayoutTypePB,
view_data: Bytes, view_data: Bytes,
) -> FutureResult<(), FlowyError> { ) -> FutureResult<(), FlowyError> {
// Only accept Document type // Only accept Document type
debug_assert_eq!(layout, ViewLayoutTypePB::Document); debug_assert_eq!(layout, ViewLayoutTypePB::Document);
let repeated_revision: RepeatedRevision = Revision::initial_revision(user_id, view_id, view_data).into(); let revision = Revision::initial_revision(view_id, view_data);
let view_id = view_id.to_string(); let view_id = view_id.to_string();
let manager = self.0.clone(); let manager = self.0.clone();
FutureResult::new(async move { FutureResult::new(async move {
let _ = manager.create_document(view_id, repeated_revision).await?; let _ = manager.create_document(view_id, vec![revision]).await?;
Ok(()) Ok(())
}) })
} }
@ -165,7 +165,7 @@ impl ViewDataProcessor for DocumentViewDataProcessor {
let manager = self.0.clone(); let manager = self.0.clone();
let view_id = view_id.to_string(); let view_id = view_id.to_string();
FutureResult::new(async move { FutureResult::new(async move {
let _ = manager.close_document_editor(view_id)?; let _ = manager.close_document_editor(view_id).await?;
Ok(()) Ok(())
}) })
} }
@ -188,15 +188,14 @@ impl ViewDataProcessor for DocumentViewDataProcessor {
_data_format: ViewDataFormatPB, _data_format: ViewDataFormatPB,
) -> FutureResult<Bytes, FlowyError> { ) -> FutureResult<Bytes, FlowyError> {
debug_assert_eq!(layout, ViewLayoutTypePB::Document); debug_assert_eq!(layout, ViewLayoutTypePB::Document);
let user_id = user_id.to_string(); let _user_id = user_id.to_string();
let view_id = view_id.to_string(); let view_id = view_id.to_string();
let manager = self.0.clone(); let manager = self.0.clone();
let document_content = self.0.initial_document_content(); let document_content = self.0.initial_document_content();
FutureResult::new(async move { FutureResult::new(async move {
let delta_data = Bytes::from(document_content); let delta_data = Bytes::from(document_content);
let repeated_revision: RepeatedRevision = let revision = Revision::initial_revision(&view_id, delta_data.clone());
Revision::initial_revision(&user_id, &view_id, delta_data.clone()).into(); let _ = manager.create_document(view_id, vec![revision]).await?;
let _ = manager.create_document(view_id, repeated_revision).await?;
Ok(delta_data) Ok(delta_data)
}) })
} }
@ -221,16 +220,16 @@ struct GridViewDataProcessor(Arc<GridManager>);
impl ViewDataProcessor for GridViewDataProcessor { impl ViewDataProcessor for GridViewDataProcessor {
fn create_view( fn create_view(
&self, &self,
user_id: &str, _user_id: &str,
view_id: &str, view_id: &str,
_layout: ViewLayoutTypePB, _layout: ViewLayoutTypePB,
delta_data: Bytes, delta_data: Bytes,
) -> FutureResult<(), FlowyError> { ) -> FutureResult<(), FlowyError> {
let repeated_revision: RepeatedRevision = Revision::initial_revision(user_id, view_id, delta_data).into(); let revision = Revision::initial_revision(view_id, delta_data);
let view_id = view_id.to_string(); let view_id = view_id.to_string();
let grid_manager = self.0.clone(); let grid_manager = self.0.clone();
FutureResult::new(async move { FutureResult::new(async move {
let _ = grid_manager.create_grid(view_id, repeated_revision).await?; let _ = grid_manager.create_grid(view_id, vec![revision]).await?;
Ok(()) Ok(())
}) })
} }

View file

@ -86,7 +86,7 @@ fn crate_log_filter(level: String) -> String {
filters.push(format!("lib_ws={}", level)); filters.push(format!("lib_ws={}", level));
filters.push(format!("lib_infra={}", level)); filters.push(format!("lib_infra={}", level));
filters.push(format!("flowy_sync={}", level)); filters.push(format!("flowy_sync={}", level));
// filters.push(format!("flowy_revision={}", level)); filters.push(format!("flowy_revision={}", level));
// filters.push(format!("lib_dispatch={}", level)); // filters.push(format!("lib_dispatch={}", level));
filters.push(format!("dart_ffi={}", "info")); filters.push(format!("dart_ffi={}", "info"));

View file

@ -1,3 +1,4 @@
use crate::util::md5;
use crate::{ use crate::{
client_document::{ client_document::{
history::{History, UndoResult}, history::{History, UndoResult},
@ -77,9 +78,9 @@ impl ClientDocument {
&self.operations &self.operations
} }
pub fn md5(&self) -> String { pub fn document_md5(&self) -> String {
let bytes = self.to_bytes(); let bytes = self.to_bytes();
format!("{:x}", md5::compute(bytes)) md5(&bytes)
} }
pub fn set_notify(&mut self, notify: mpsc::UnboundedSender<()>) { pub fn set_notify(&mut self, notify: mpsc::UnboundedSender<()>) {

View file

@ -1,9 +1,9 @@
use crate::errors::internal_error; use crate::errors::internal_error;
use crate::server_folder::{FolderOperations, FolderOperationsBuilder}; use crate::server_folder::{FolderOperations, FolderOperationsBuilder};
use crate::util::cal_diff; use crate::util::{cal_diff, md5};
use crate::{ use crate::{
client_folder::builder::FolderPadBuilder, client_folder::builder::FolderPadBuilder,
entities::revision::{md5, Revision}, entities::revision::Revision,
errors::{CollaborateError, CollaborateResult}, errors::{CollaborateError, CollaborateResult},
}; };
use flowy_folder_data_model::revision::{AppRevision, FolderRevision, TrashRevision, ViewRevision, WorkspaceRevision}; use flowy_folder_data_model::revision::{AppRevision, FolderRevision, TrashRevision, ViewRevision, WorkspaceRevision};
@ -61,7 +61,7 @@ impl FolderPad {
self.folder_rev = folder.folder_rev; self.folder_rev = folder.folder_rev;
self.operations = folder.operations; self.operations = folder.operations;
Ok(self.md5()) Ok(self.folder_md5())
} }
pub fn compose_remote_operations(&mut self, operations: FolderOperations) -> CollaborateResult<String> { pub fn compose_remote_operations(&mut self, operations: FolderOperations) -> CollaborateResult<String> {
@ -313,7 +313,7 @@ impl FolderPad {
} }
} }
pub fn md5(&self) -> String { pub fn folder_md5(&self) -> String {
md5(&self.operations.json_bytes()) md5(&self.operations.json_bytes())
} }
@ -345,7 +345,7 @@ impl FolderPad {
self.operations = self.operations.compose(&operations)?; self.operations = self.operations.compose(&operations)?;
Ok(Some(FolderChangeset { Ok(Some(FolderChangeset {
operations, operations,
md5: self.md5(), md5: self.folder_md5(),
})) }))
} }
} }
@ -383,7 +383,7 @@ impl FolderPad {
self.operations = self.operations.compose(&operations)?; self.operations = self.operations.compose(&operations)?;
Ok(Some(FolderChangeset { Ok(Some(FolderChangeset {
operations, operations,
md5: self.md5(), md5: self.folder_md5(),
})) }))
} }
} }

View file

@ -1,6 +1,6 @@
use crate::entities::revision::{md5, RepeatedRevision, Revision}; use crate::entities::revision::{RepeatedRevision, Revision};
use crate::errors::{CollaborateError, CollaborateResult}; use crate::errors::{CollaborateError, CollaborateResult};
use crate::util::{cal_diff, make_operations_from_revisions}; use crate::util::{cal_diff, make_operations_from_revisions, md5};
use flowy_grid_data_model::revision::{ use flowy_grid_data_model::revision::{
gen_block_id, gen_row_id, CellRevision, GridBlockRevision, RowChangeset, RowRevision, gen_block_id, gen_row_id, CellRevision, GridBlockRevision, RowChangeset, RowRevision,
}; };
@ -256,10 +256,10 @@ pub fn make_grid_block_operations(block_rev: &GridBlockRevision) -> GridBlockOpe
GridBlockOperationsBuilder::new().insert(&json).build() GridBlockOperationsBuilder::new().insert(&json).build()
} }
pub fn make_grid_block_revisions(user_id: &str, grid_block_meta_data: &GridBlockRevision) -> RepeatedRevision { pub fn make_grid_block_revisions(_user_id: &str, grid_block_meta_data: &GridBlockRevision) -> RepeatedRevision {
let operations = make_grid_block_operations(grid_block_meta_data); let operations = make_grid_block_operations(grid_block_meta_data);
let bytes = operations.json_bytes(); let bytes = operations.json_bytes();
let revision = Revision::initial_revision(user_id, &grid_block_meta_data.block_id, bytes); let revision = Revision::initial_revision(&grid_block_meta_data.block_id, bytes);
revision.into() revision.into()
} }

View file

@ -1,6 +1,6 @@
use crate::entities::revision::{md5, RepeatedRevision, Revision}; use crate::entities::revision::{RepeatedRevision, Revision};
use crate::errors::{internal_error, CollaborateError, CollaborateResult}; use crate::errors::{internal_error, CollaborateError, CollaborateResult};
use crate::util::{cal_diff, make_operations_from_revisions}; use crate::util::{cal_diff, make_operations_from_revisions, md5};
use flowy_grid_data_model::revision::{ use flowy_grid_data_model::revision::{
gen_block_id, gen_grid_id, FieldRevision, FieldTypeRevision, GridBlockMetaRevision, GridBlockMetaRevisionChangeset, gen_block_id, gen_grid_id, FieldRevision, FieldTypeRevision, GridBlockMetaRevision, GridBlockMetaRevisionChangeset,
@ -315,7 +315,7 @@ impl GridRevisionPad {
}) })
} }
pub fn md5(&self) -> String { pub fn grid_md5(&self) -> String {
md5(&self.operations.json_bytes()) md5(&self.operations.json_bytes())
} }
@ -343,7 +343,7 @@ impl GridRevisionPad {
self.operations = self.operations.compose(&operations)?; self.operations = self.operations.compose(&operations)?;
Ok(Some(GridRevisionChangeset { Ok(Some(GridRevisionChangeset {
operations, operations,
md5: self.md5(), md5: self.grid_md5(),
})) }))
} }
} }
@ -409,10 +409,10 @@ pub fn make_grid_operations(grid_rev: &GridRevision) -> GridOperations {
GridOperationsBuilder::new().insert(&json).build() GridOperationsBuilder::new().insert(&json).build()
} }
pub fn make_grid_revisions(user_id: &str, grid_rev: &GridRevision) -> RepeatedRevision { pub fn make_grid_revisions(_user_id: &str, grid_rev: &GridRevision) -> RepeatedRevision {
let operations = make_grid_operations(grid_rev); let operations = make_grid_operations(grid_rev);
let bytes = operations.json_bytes(); let bytes = operations.json_bytes();
let revision = Revision::initial_revision(user_id, &grid_rev.grid_id, bytes); let revision = Revision::initial_revision(&grid_rev.grid_id, bytes);
revision.into() revision.into()
} }

View file

@ -1,6 +1,6 @@
use crate::entities::revision::{md5, Revision}; use crate::entities::revision::Revision;
use crate::errors::{internal_error, CollaborateError, CollaborateResult}; use crate::errors::{internal_error, CollaborateError, CollaborateResult};
use crate::util::{cal_diff, make_operations_from_revisions}; use crate::util::{cal_diff, make_operations_from_revisions, md5};
use flowy_grid_data_model::revision::{ use flowy_grid_data_model::revision::{
FieldRevision, FieldTypeRevision, FilterConfigurationRevision, FilterConfigurationsByFieldId, GridViewRevision, FieldRevision, FieldTypeRevision, FilterConfigurationRevision, FilterConfigurationsByFieldId, GridViewRevision,
GroupConfigurationRevision, GroupConfigurationsByFieldId, LayoutRevision, GroupConfigurationRevision, GroupConfigurationsByFieldId, LayoutRevision,

View file

@ -1,5 +1,6 @@
use crate::util::md5;
use bytes::Bytes; use bytes::Bytes;
use flowy_derive::{ProtoBuf, ProtoBuf_Enum}; use flowy_derive::ProtoBuf;
use std::{convert::TryFrom, fmt::Formatter, ops::RangeInclusive}; use std::{convert::TryFrom, fmt::Formatter, ops::RangeInclusive};
pub type RevisionObject = lib_ot::text_delta::DeltaTextOperations; pub type RevisionObject = lib_ot::text_delta::DeltaTextOperations;
@ -20,12 +21,6 @@ pub struct Revision {
#[pb(index = 5)] #[pb(index = 5)]
pub object_id: String, pub object_id: String,
#[pb(index = 6)]
ty: RevType, // Deprecated
#[pb(index = 7)]
pub user_id: String,
} }
impl std::convert::From<Vec<u8>> for Revision { impl std::convert::From<Vec<u8>> for Revision {
@ -36,25 +31,7 @@ impl std::convert::From<Vec<u8>> for Revision {
} }
impl Revision { impl Revision {
pub fn is_empty(&self) -> bool { pub fn new<T: Into<String>>(object_id: &str, base_rev_id: i64, rev_id: i64, bytes: Bytes, md5: T) -> Revision {
self.base_rev_id == self.rev_id
}
pub fn pair_rev_id(&self) -> (i64, i64) {
(self.base_rev_id, self.rev_id)
}
pub fn is_initial(&self) -> bool {
self.rev_id == 0
}
pub fn initial_revision(user_id: &str, object_id: &str, bytes: Bytes) -> Self {
let md5 = md5(&bytes);
Self::new(object_id, 0, 0, bytes, user_id, md5)
}
pub fn new(object_id: &str, base_rev_id: i64, rev_id: i64, bytes: Bytes, user_id: &str, md5: String) -> Revision {
let user_id = user_id.to_owned();
let object_id = object_id.to_owned(); let object_id = object_id.to_owned();
let bytes = bytes.to_vec(); let bytes = bytes.to_vec();
let base_rev_id = base_rev_id; let base_rev_id = base_rev_id;
@ -68,12 +45,27 @@ impl Revision {
base_rev_id, base_rev_id,
rev_id, rev_id,
bytes, bytes,
md5, md5: md5.into(),
object_id, object_id,
ty: RevType::DeprecatedLocal,
user_id,
} }
} }
pub fn is_empty(&self) -> bool {
self.base_rev_id == self.rev_id
}
pub fn pair_rev_id(&self) -> (i64, i64) {
(self.base_rev_id, self.rev_id)
}
pub fn is_initial(&self) -> bool {
self.rev_id == 0
}
pub fn initial_revision(object_id: &str, bytes: Bytes) -> Self {
let md5 = md5(&bytes);
Self::new(object_id, 0, 0, bytes, md5)
}
} }
impl std::fmt::Debug for Revision { impl std::fmt::Debug for Revision {
@ -186,10 +178,10 @@ impl std::fmt::Display for RevisionRange {
} }
impl RevisionRange { impl RevisionRange {
pub fn len(&self) -> i64 { pub fn len(&self) -> u64 {
debug_assert!(self.end >= self.start); debug_assert!(self.end >= self.start);
if self.end >= self.start { if self.end >= self.start {
self.end - self.start + 1 (self.end - self.start + 1) as u64
} else { } else {
0 0
} }
@ -208,21 +200,3 @@ impl RevisionRange {
self.iter().collect::<Vec<_>>() self.iter().collect::<Vec<_>>()
} }
} }
#[inline]
pub fn md5<T: AsRef<[u8]>>(data: T) -> String {
let md5 = format!("{:x}", md5::compute(data));
md5
}
#[derive(Debug, ProtoBuf_Enum, Clone, Eq, PartialEq)]
pub enum RevType {
DeprecatedLocal = 0,
DeprecatedRemote = 1,
}
impl std::default::Default for RevType {
fn default() -> Self {
RevType::DeprecatedLocal
}
}

View file

@ -49,7 +49,8 @@ impl RevIdCounter {
pub fn new(n: i64) -> Self { pub fn new(n: i64) -> Self {
Self(AtomicI64::new(n)) Self(AtomicI64::new(n))
} }
pub fn next(&self) -> i64 {
pub fn next_id(&self) -> i64 {
let _ = self.0.fetch_add(1, SeqCst); let _ = self.0.fetch_add(1, SeqCst);
self.value() self.value()
} }

View file

@ -1,4 +1,5 @@
pub mod code_gen; pub mod code_gen;
pub mod future; pub mod future;
pub mod ref_map;
pub mod retry; pub mod retry;
pub mod util; pub mod util;

View file

@ -0,0 +1,80 @@
use std::collections::HashMap;
use std::sync::Arc;
pub trait RefCountValue {
fn did_remove(&self);
}
struct RefCountHandler<T> {
ref_count: usize,
inner: T,
}
impl<T> RefCountHandler<T> {
pub fn new(inner: T) -> Self {
Self { ref_count: 1, inner }
}
pub fn increase_ref_count(&mut self) {
self.ref_count += 1;
}
}
pub struct RefCountHashMap<T>(HashMap<String, RefCountHandler<T>>);
impl<T> std::default::Default for RefCountHashMap<T> {
fn default() -> Self {
Self(HashMap::new())
}
}
impl<T> RefCountHashMap<T>
where
T: Clone + Send + Sync + RefCountValue,
{
pub fn new() -> Self {
Self::default()
}
pub fn get(&self, key: &str) -> Option<T> {
self.0.get(key).map(|handler| handler.inner.clone())
}
pub fn values(&self) -> Vec<T> {
self.0.values().map(|value| value.inner.clone()).collect::<Vec<T>>()
}
pub fn insert(&mut self, key: String, value: T) {
if let Some(handler) = self.0.get_mut(&key) {
handler.increase_ref_count();
} else {
let handler = RefCountHandler::new(value);
self.0.insert(key, handler);
}
}
pub fn remove(&mut self, key: &str) {
let mut should_remove = false;
if let Some(value) = self.0.get_mut(key) {
if value.ref_count > 0 {
value.ref_count -= 1;
}
should_remove = value.ref_count == 0;
}
if should_remove {
if let Some(handler) = self.0.remove(key) {
handler.inner.did_remove();
}
}
}
}
impl<T> RefCountValue for Arc<T>
where
T: RefCountValue,
{
fn did_remove(&self) {
(**self).did_remove()
}
}

View file

@ -35,9 +35,19 @@ impl NodeTree {
Ok(tree) Ok(tree)
} }
pub fn from_bytes(bytes: Vec<u8>, context: NodeTreeContext) -> Result<Self, OTError> { pub fn from_bytes(bytes: &[u8]) -> Result<Self, OTError> {
let operations = NodeOperations::from_bytes(bytes)?; let tree: NodeTree = serde_json::from_slice(bytes).map_err(|e| OTError::serde().context(e))?;
Self::from_operations(operations, context) Ok(tree)
}
pub fn to_bytes(&self) -> Vec<u8> {
match serde_json::to_vec(self) {
Ok(bytes) => bytes,
Err(e) => {
tracing::error!("{}", e);
vec![]
}
}
} }
pub fn from_operations<T: Into<NodeOperations>>(operations: T, context: NodeTreeContext) -> Result<Self, OTError> { pub fn from_operations<T: Into<NodeOperations>>(operations: T, context: NodeTreeContext) -> Result<Self, OTError> {

View file

@ -26,6 +26,7 @@ fn operation_insert_node_with_children_serde_test() {
r#"{"op":"insert","path":[0,1],"nodes":[{"type":"text","children":[{"type":"sub_text"}]}]}"# r#"{"op":"insert","path":[0,1],"nodes":[{"type":"text","children":[{"type":"sub_text"}]}]}"#
); );
} }
#[test] #[test]
fn operation_update_node_attributes_serde_test() { fn operation_update_node_attributes_serde_test() {
let operation = NodeOperation::Update { let operation = NodeOperation::Update {
@ -102,6 +103,14 @@ fn node_tree_serialize_test() {
assert_eq!(json, TREE_JSON); assert_eq!(json, TREE_JSON);
} }
#[test]
fn node_tree_serde_test() {
let tree: NodeTree = serde_json::from_str(TREE_JSON).unwrap();
let bytes = tree.to_bytes();
let tree = NodeTree::from_bytes(&bytes).unwrap();
assert_eq!(bytes, tree.to_bytes());
}
#[allow(dead_code)] #[allow(dead_code)]
const TREE_JSON: &str = r#"{ const TREE_JSON: &str = r#"{
"type": "editor", "type": "editor",