2021-12-08 14:17:40 +08:00
|
|
|
use crate::{
|
|
|
|
errors::{internal_error, DocError, DocResult},
|
2021-12-13 22:46:35 +08:00
|
|
|
services::doc::revision::{
|
|
|
|
cache::{disk::RevisionDiskCache, memory::RevisionMemoryCache},
|
|
|
|
RevisionRecord,
|
|
|
|
RevisionServer,
|
|
|
|
},
|
2021-12-08 14:17:40 +08:00
|
|
|
sql_tables::RevTableSql,
|
|
|
|
};
|
2021-12-11 13:47:16 +08:00
|
|
|
use flowy_collaboration::entities::doc::Doc;
|
2021-12-08 14:17:40 +08:00
|
|
|
use flowy_database::ConnectionPool;
|
2021-12-13 13:55:44 +08:00
|
|
|
use lib_infra::future::FutureResult;
|
2021-12-08 14:17:40 +08:00
|
|
|
use lib_ot::{
|
|
|
|
core::{Operation, OperationTransformable},
|
2021-12-13 22:46:35 +08:00
|
|
|
revision::{RevState, RevType, Revision, RevisionRange},
|
2021-12-08 14:17:40 +08:00
|
|
|
rich_text::RichTextDelta,
|
|
|
|
};
|
|
|
|
use std::{sync::Arc, time::Duration};
|
|
|
|
use tokio::{
|
2021-12-08 17:33:22 +08:00
|
|
|
sync::{mpsc, RwLock},
|
2021-12-08 14:17:40 +08:00
|
|
|
task::{spawn_blocking, JoinHandle},
|
|
|
|
};
|
|
|
|
|
|
|
|
pub trait RevisionIterator: Send + Sync {
|
2021-12-13 13:55:44 +08:00
|
|
|
fn next(&self) -> FutureResult<Option<RevisionRecord>, DocError>;
|
2021-12-08 14:17:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
type DocRevisionDeskCache = dyn RevisionDiskCache<Error = DocError>;
|
|
|
|
|
|
|
|
pub struct RevisionCache {
|
2021-12-09 22:28:11 +08:00
|
|
|
user_id: String,
|
2021-12-08 14:17:40 +08:00
|
|
|
doc_id: String,
|
|
|
|
dish_cache: Arc<DocRevisionDeskCache>,
|
|
|
|
memory_cache: Arc<RevisionMemoryCache>,
|
|
|
|
defer_save: RwLock<Option<JoinHandle<()>>>,
|
|
|
|
server: Arc<dyn RevisionServer>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RevisionCache {
|
2021-12-09 22:28:11 +08:00
|
|
|
pub fn new(
|
|
|
|
user_id: &str,
|
|
|
|
doc_id: &str,
|
|
|
|
pool: Arc<ConnectionPool>,
|
|
|
|
server: Arc<dyn RevisionServer>,
|
|
|
|
) -> RevisionCache {
|
2021-12-08 14:17:40 +08:00
|
|
|
let doc_id = doc_id.to_owned();
|
2021-12-09 22:28:11 +08:00
|
|
|
let dish_cache = Arc::new(Persistence::new(user_id, pool));
|
2021-12-08 14:17:40 +08:00
|
|
|
let memory_cache = Arc::new(RevisionMemoryCache::new());
|
|
|
|
Self {
|
2021-12-09 22:28:11 +08:00
|
|
|
user_id: user_id.to_owned(),
|
2021-12-08 14:17:40 +08:00
|
|
|
doc_id,
|
|
|
|
dish_cache,
|
|
|
|
memory_cache,
|
|
|
|
defer_save: RwLock::new(None),
|
|
|
|
server,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tracing::instrument(level = "debug", skip(self, revision))]
|
2021-12-13 22:46:35 +08:00
|
|
|
pub async fn add_local_revision(&self, revision: Revision) -> DocResult<()> {
|
2021-12-08 14:17:40 +08:00
|
|
|
if self.memory_cache.contains(&revision.rev_id) {
|
|
|
|
return Err(DocError::duplicate_rev().context(format!("Duplicate revision id: {}", revision.rev_id)));
|
|
|
|
}
|
2021-12-13 22:46:35 +08:00
|
|
|
let record = RevisionRecord {
|
|
|
|
revision,
|
2021-12-14 15:31:44 +08:00
|
|
|
state: RevState::StateLocal,
|
2021-12-13 22:46:35 +08:00
|
|
|
};
|
|
|
|
self.memory_cache.add_revision(record).await?;
|
|
|
|
self.save_revisions().await;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tracing::instrument(level = "debug", skip(self, revision))]
|
|
|
|
pub async fn add_remote_revision(&self, revision: Revision) -> DocResult<()> {
|
|
|
|
if self.memory_cache.contains(&revision.rev_id) {
|
|
|
|
return Err(DocError::duplicate_rev().context(format!("Duplicate revision id: {}", revision.rev_id)));
|
|
|
|
}
|
|
|
|
let record = RevisionRecord {
|
|
|
|
revision,
|
2021-12-14 15:31:44 +08:00
|
|
|
state: RevState::StateLocal,
|
2021-12-13 22:46:35 +08:00
|
|
|
};
|
|
|
|
self.memory_cache.add_revision(record).await?;
|
2021-12-08 14:17:40 +08:00
|
|
|
self.save_revisions().await;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-12-09 19:01:58 +08:00
|
|
|
#[tracing::instrument(level = "debug", skip(self, rev_id), fields(rev_id = %rev_id))]
|
|
|
|
pub async fn ack_revision(&self, rev_id: i64) {
|
|
|
|
self.memory_cache.ack_revision(&rev_id).await;
|
2021-12-08 14:17:40 +08:00
|
|
|
self.save_revisions().await;
|
|
|
|
}
|
|
|
|
|
2021-12-13 22:46:35 +08:00
|
|
|
pub async fn query_revision(&self, doc_id: &str, rev_id: i64) -> Option<RevisionRecord> {
|
|
|
|
match self.memory_cache.query_revision(&rev_id).await {
|
|
|
|
None => match self.dish_cache.read_revision(doc_id, rev_id) {
|
|
|
|
Ok(revision) => revision,
|
|
|
|
Err(e) => {
|
|
|
|
log::error!("query_revision error: {:?}", e);
|
|
|
|
None
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Some(record) => Some(record),
|
|
|
|
}
|
2021-12-09 19:01:58 +08:00
|
|
|
}
|
|
|
|
|
2021-12-08 14:17:40 +08:00
|
|
|
async fn save_revisions(&self) {
|
|
|
|
if let Some(handler) = self.defer_save.write().await.take() {
|
|
|
|
handler.abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
if self.memory_cache.is_empty() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let memory_cache = self.memory_cache.clone();
|
|
|
|
let disk_cache = self.dish_cache.clone();
|
|
|
|
*self.defer_save.write().await = Some(tokio::spawn(async move {
|
|
|
|
tokio::time::sleep(Duration::from_millis(300)).await;
|
|
|
|
let (ids, records) = memory_cache.revisions();
|
|
|
|
match disk_cache.create_revisions(records) {
|
|
|
|
Ok(_) => {
|
|
|
|
memory_cache.remove_revisions(ids);
|
|
|
|
},
|
|
|
|
Err(e) => log::error!("Save revision failed: {:?}", e),
|
|
|
|
}
|
|
|
|
}));
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn revisions_in_range(&self, range: RevisionRange) -> DocResult<Vec<Revision>> {
|
|
|
|
let revs = self.memory_cache.revisions_in_range(&range).await?;
|
|
|
|
if revs.len() == range.len() as usize {
|
|
|
|
Ok(revs)
|
|
|
|
} else {
|
|
|
|
let doc_id = self.doc_id.clone();
|
|
|
|
let disk_cache = self.dish_cache.clone();
|
2021-12-13 22:46:35 +08:00
|
|
|
let records = spawn_blocking(move || disk_cache.revisions_in_range(&doc_id, &range))
|
2021-12-08 14:17:40 +08:00
|
|
|
.await
|
2021-12-13 22:46:35 +08:00
|
|
|
.map_err(internal_error)??;
|
|
|
|
|
|
|
|
let revisions = records
|
|
|
|
.into_iter()
|
|
|
|
.map(|record| record.revision)
|
|
|
|
.collect::<Vec<Revision>>();
|
|
|
|
Ok(revisions)
|
2021-12-08 14:17:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:33:22 +08:00
|
|
|
pub async fn load_document(&self) -> DocResult<Doc> {
|
|
|
|
// Loading the document from disk and it will be sync with server.
|
|
|
|
let result = load_from_disk(&self.doc_id, self.memory_cache.clone(), self.dish_cache.clone()).await;
|
2021-12-08 14:17:40 +08:00
|
|
|
if result.is_ok() {
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:33:22 +08:00
|
|
|
// The document doesn't exist in local. Try load from server
|
|
|
|
let doc = self.server.fetch_document(&self.doc_id).await?;
|
2021-12-08 14:17:40 +08:00
|
|
|
let delta_data = doc.data.as_bytes();
|
|
|
|
let revision = Revision::new(
|
|
|
|
doc.base_rev_id,
|
|
|
|
doc.rev_id,
|
|
|
|
delta_data.to_owned(),
|
|
|
|
&doc.id,
|
|
|
|
RevType::Remote,
|
2021-12-09 22:28:11 +08:00
|
|
|
self.user_id.clone(),
|
2021-12-08 14:17:40 +08:00
|
|
|
);
|
2021-12-13 22:46:35 +08:00
|
|
|
|
|
|
|
self.add_remote_revision(revision).await?;
|
2021-12-08 14:17:40 +08:00
|
|
|
Ok(doc)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RevisionIterator for RevisionCache {
|
2021-12-13 13:55:44 +08:00
|
|
|
fn next(&self) -> FutureResult<Option<RevisionRecord>, DocError> {
|
2021-12-08 14:17:40 +08:00
|
|
|
let memory_cache = self.memory_cache.clone();
|
|
|
|
let disk_cache = self.dish_cache.clone();
|
|
|
|
let doc_id = self.doc_id.clone();
|
2021-12-13 13:55:44 +08:00
|
|
|
FutureResult::new(async move {
|
2021-12-13 22:46:35 +08:00
|
|
|
match memory_cache.front_local_revision().await {
|
2021-12-08 14:17:40 +08:00
|
|
|
None => {
|
|
|
|
//
|
2021-12-13 22:46:35 +08:00
|
|
|
match memory_cache.front_local_rev_id().await {
|
2021-12-08 14:17:40 +08:00
|
|
|
None => Ok(None),
|
|
|
|
Some(rev_id) => match disk_cache.read_revision(&doc_id, rev_id)? {
|
|
|
|
None => Ok(None),
|
2021-12-13 22:46:35 +08:00
|
|
|
Some(record) => Ok(Some(record)),
|
2021-12-08 14:17:40 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Some((_, record)) => Ok(Some(record)),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:33:22 +08:00
|
|
|
async fn load_from_disk(
|
|
|
|
doc_id: &str,
|
|
|
|
memory_cache: Arc<RevisionMemoryCache>,
|
|
|
|
disk_cache: Arc<DocRevisionDeskCache>,
|
|
|
|
) -> DocResult<Doc> {
|
2021-12-08 14:17:40 +08:00
|
|
|
let doc_id = doc_id.to_owned();
|
2021-12-08 17:33:22 +08:00
|
|
|
let (tx, mut rx) = mpsc::channel(2);
|
|
|
|
let doc = spawn_blocking(move || {
|
2021-12-13 22:46:35 +08:00
|
|
|
let records = disk_cache.read_revisions(&doc_id)?;
|
|
|
|
if records.is_empty() {
|
2021-12-08 17:33:22 +08:00
|
|
|
return Err(DocError::doc_not_found().context("Local doesn't have this document"));
|
2021-12-08 14:17:40 +08:00
|
|
|
}
|
|
|
|
|
2021-12-13 22:46:35 +08:00
|
|
|
let (base_rev_id, rev_id) = records.last().unwrap().revision.pair_rev_id();
|
2021-12-08 14:17:40 +08:00
|
|
|
let mut delta = RichTextDelta::new();
|
2021-12-13 22:46:35 +08:00
|
|
|
for (_, record) in records.into_iter().enumerate() {
|
2021-12-08 17:33:22 +08:00
|
|
|
// Opti: revision's clone may cause memory issues
|
2021-12-13 22:46:35 +08:00
|
|
|
match RichTextDelta::from_bytes(record.revision.clone().delta_data) {
|
2021-12-08 14:17:40 +08:00
|
|
|
Ok(local_delta) => {
|
|
|
|
delta = delta.compose(&local_delta)?;
|
2021-12-13 22:46:35 +08:00
|
|
|
match tx.blocking_send(record) {
|
2021-12-08 17:33:22 +08:00
|
|
|
Ok(_) => {},
|
2021-12-13 22:46:35 +08:00
|
|
|
Err(e) => tracing::error!("❌Load document from disk error: {}", e),
|
2021-12-08 17:33:22 +08:00
|
|
|
}
|
2021-12-08 14:17:40 +08:00
|
|
|
},
|
|
|
|
Err(e) => {
|
2021-12-13 22:46:35 +08:00
|
|
|
tracing::error!("Deserialize delta from revision failed: {}", e);
|
2021-12-08 14:17:40 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-08 17:33:22 +08:00
|
|
|
correct_delta_if_need(&mut delta);
|
2021-12-08 14:17:40 +08:00
|
|
|
Result::<Doc, DocError>::Ok(Doc {
|
|
|
|
id: doc_id,
|
|
|
|
data: delta.to_json(),
|
2021-12-08 17:33:22 +08:00
|
|
|
rev_id,
|
|
|
|
base_rev_id,
|
2021-12-08 14:17:40 +08:00
|
|
|
})
|
|
|
|
})
|
|
|
|
.await
|
2021-12-08 17:33:22 +08:00
|
|
|
.map_err(internal_error)?;
|
|
|
|
|
2021-12-13 22:46:35 +08:00
|
|
|
while let Some(record) = rx.recv().await {
|
|
|
|
match memory_cache.add_revision(record).await {
|
2021-12-08 17:33:22 +08:00
|
|
|
Ok(_) => {},
|
|
|
|
Err(e) => log::error!("{:?}", e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
doc
|
2021-12-08 14:17:40 +08:00
|
|
|
}
|
|
|
|
|
2021-12-08 17:33:22 +08:00
|
|
|
fn correct_delta_if_need(delta: &mut RichTextDelta) {
|
2021-12-08 14:17:40 +08:00
|
|
|
if delta.ops.last().is_none() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let data = delta.ops.last().as_ref().unwrap().get_data();
|
|
|
|
if !data.ends_with('\n') {
|
2021-12-13 22:46:35 +08:00
|
|
|
log::error!("❌The op must end with newline. Correcting it by inserting newline op");
|
2021-12-08 17:33:22 +08:00
|
|
|
delta.ops.push(Operation::Insert("\n".into()));
|
2021-12-08 14:17:40 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(crate) struct Persistence {
|
2021-12-09 22:28:11 +08:00
|
|
|
user_id: String,
|
2021-12-08 14:17:40 +08:00
|
|
|
pub(crate) pool: Arc<ConnectionPool>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RevisionDiskCache for Persistence {
|
|
|
|
type Error = DocError;
|
|
|
|
|
|
|
|
fn create_revisions(&self, revisions: Vec<RevisionRecord>) -> Result<(), Self::Error> {
|
|
|
|
let conn = &*self.pool.get().map_err(internal_error)?;
|
|
|
|
conn.immediate_transaction::<_, DocError, _>(|| {
|
|
|
|
let _ = RevTableSql::create_rev_table(revisions, conn)?;
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-13 22:46:35 +08:00
|
|
|
fn revisions_in_range(&self, doc_id: &str, range: &RevisionRange) -> Result<Vec<RevisionRecord>, Self::Error> {
|
2021-12-08 14:17:40 +08:00
|
|
|
let conn = &*self.pool.get().map_err(internal_error).unwrap();
|
2021-12-09 22:28:11 +08:00
|
|
|
let revisions = RevTableSql::read_rev_tables_with_range(&self.user_id, doc_id, range.clone(), conn)?;
|
2021-12-08 14:17:40 +08:00
|
|
|
Ok(revisions)
|
|
|
|
}
|
|
|
|
|
2021-12-13 22:46:35 +08:00
|
|
|
fn read_revision(&self, doc_id: &str, rev_id: i64) -> Result<Option<RevisionRecord>, Self::Error> {
|
2021-12-08 14:17:40 +08:00
|
|
|
let conn = self.pool.get().map_err(internal_error)?;
|
2021-12-09 22:28:11 +08:00
|
|
|
let some = RevTableSql::read_rev_table(&self.user_id, doc_id, &rev_id, &*conn)?;
|
2021-12-08 14:17:40 +08:00
|
|
|
Ok(some)
|
|
|
|
}
|
|
|
|
|
2021-12-13 22:46:35 +08:00
|
|
|
fn read_revisions(&self, doc_id: &str) -> Result<Vec<RevisionRecord>, Self::Error> {
|
2021-12-08 14:17:40 +08:00
|
|
|
let conn = self.pool.get().map_err(internal_error)?;
|
2021-12-09 22:28:11 +08:00
|
|
|
let some = RevTableSql::read_rev_tables(&self.user_id, doc_id, &*conn)?;
|
2021-12-08 14:17:40 +08:00
|
|
|
Ok(some)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Persistence {
|
2021-12-09 22:28:11 +08:00
|
|
|
pub(crate) fn new(user_id: &str, pool: Arc<ConnectionPool>) -> Self {
|
|
|
|
Self {
|
|
|
|
user_id: user_id.to_owned(),
|
|
|
|
pool,
|
|
|
|
}
|
|
|
|
}
|
2021-12-08 14:17:40 +08:00
|
|
|
}
|
2021-12-08 21:51:06 +08:00
|
|
|
|
|
|
|
#[cfg(feature = "flowy_unit_test")]
|
|
|
|
impl RevisionCache {
|
|
|
|
pub fn dish_cache(&self) -> Arc<DocRevisionDeskCache> { self.dish_cache.clone() }
|
|
|
|
|
|
|
|
pub fn memory_cache(&self) -> Arc<RevisionMemoryCache> { self.memory_cache.clone() }
|
|
|
|
}
|