2021-10-02 17:19:54 +08:00
|
|
|
use crate::{
|
2021-10-06 15:23:38 +08:00
|
|
|
entities::doc::{revision_from_doc, Doc, RevId, RevType, Revision, RevisionRange},
|
2021-10-02 17:19:54 +08:00
|
|
|
errors::{internal_error, DocError, DocResult},
|
2021-11-02 22:18:13 +08:00
|
|
|
services::doc::revision::{model::*, RevisionServer},
|
2021-10-08 13:46:23 +08:00
|
|
|
sql_tables::RevState,
|
2021-10-02 17:19:54 +08:00
|
|
|
};
|
2021-11-02 22:18:13 +08:00
|
|
|
use async_stream::stream;
|
2021-10-08 13:46:23 +08:00
|
|
|
use dashmap::DashMap;
|
2021-10-02 21:35:06 +08:00
|
|
|
use flowy_database::ConnectionPool;
|
2021-10-08 13:46:23 +08:00
|
|
|
use flowy_infra::future::ResultFuture;
|
2021-10-07 20:46:29 +08:00
|
|
|
use flowy_ot::core::{Delta, OperationTransformable};
|
2021-11-02 22:18:13 +08:00
|
|
|
use futures::stream::StreamExt;
|
2021-10-08 13:46:23 +08:00
|
|
|
use std::{collections::VecDeque, sync::Arc, time::Duration};
|
2021-10-02 17:19:54 +08:00
|
|
|
use tokio::{
|
2021-10-09 16:43:56 +08:00
|
|
|
sync::{broadcast, mpsc, RwLock},
|
2021-10-02 17:19:54 +08:00
|
|
|
task::{spawn_blocking, JoinHandle},
|
|
|
|
};
|
|
|
|
|
2021-10-08 13:46:23 +08:00
|
|
|
pub struct RevisionStore {
|
2021-10-02 17:19:54 +08:00
|
|
|
doc_id: String,
|
|
|
|
persistence: Arc<Persistence>,
|
2021-11-03 13:52:33 +08:00
|
|
|
revs_map: Arc<DashMap<i64, RevisionRecord>>,
|
2021-10-08 13:46:23 +08:00
|
|
|
pending_tx: PendingSender,
|
2021-10-07 20:46:29 +08:00
|
|
|
pending_revs: Arc<RwLock<VecDeque<PendingRevId>>>,
|
2021-11-03 13:52:33 +08:00
|
|
|
defer_save_oper: RwLock<Option<JoinHandle<()>>>,
|
2021-10-02 17:19:54 +08:00
|
|
|
server: Arc<dyn RevisionServer>,
|
|
|
|
}
|
|
|
|
|
2021-10-08 13:46:23 +08:00
|
|
|
impl RevisionStore {
|
2021-10-02 17:19:54 +08:00
|
|
|
pub fn new(
|
|
|
|
doc_id: &str,
|
|
|
|
pool: Arc<ConnectionPool>,
|
|
|
|
server: Arc<dyn RevisionServer>,
|
2021-11-02 22:18:13 +08:00
|
|
|
ws_revision_sender: mpsc::UnboundedSender<Revision>,
|
2021-10-08 13:46:23 +08:00
|
|
|
) -> Arc<RevisionStore> {
|
2021-10-02 17:19:54 +08:00
|
|
|
let doc_id = doc_id.to_owned();
|
2021-10-07 20:46:29 +08:00
|
|
|
let persistence = Arc::new(Persistence::new(pool));
|
|
|
|
let revs_map = Arc::new(DashMap::new());
|
2021-10-08 13:46:23 +08:00
|
|
|
let (pending_tx, pending_rx) = mpsc::unbounded_channel();
|
2021-10-07 20:46:29 +08:00
|
|
|
let pending_revs = Arc::new(RwLock::new(VecDeque::new()));
|
2021-10-02 17:19:54 +08:00
|
|
|
|
2021-10-08 13:46:23 +08:00
|
|
|
let store = Arc::new(Self {
|
2021-10-02 17:19:54 +08:00
|
|
|
doc_id,
|
|
|
|
persistence,
|
2021-10-07 20:46:29 +08:00
|
|
|
revs_map,
|
|
|
|
pending_revs,
|
2021-10-08 13:46:23 +08:00
|
|
|
pending_tx,
|
2021-11-03 13:52:33 +08:00
|
|
|
defer_save_oper: RwLock::new(None),
|
2021-10-02 17:19:54 +08:00
|
|
|
server,
|
2021-10-08 13:46:23 +08:00
|
|
|
});
|
|
|
|
|
2021-11-02 22:18:13 +08:00
|
|
|
tokio::spawn(RevisionStream::new(store.clone(), pending_rx, ws_revision_sender).run());
|
2021-10-08 13:46:23 +08:00
|
|
|
|
|
|
|
store
|
2021-10-02 17:19:54 +08:00
|
|
|
}
|
|
|
|
|
2021-10-05 19:32:58 +08:00
|
|
|
#[tracing::instrument(level = "debug", skip(self, revision))]
|
2021-10-07 20:46:29 +08:00
|
|
|
pub async fn handle_new_revision(&self, revision: Revision) -> DocResult<()> {
|
|
|
|
if self.revs_map.contains_key(&revision.rev_id) {
|
2021-10-04 14:24:35 +08:00
|
|
|
return Err(DocError::duplicate_rev().context(format!("Duplicate revision id: {}", revision.rev_id)));
|
|
|
|
}
|
|
|
|
|
2021-10-08 15:08:56 +08:00
|
|
|
let (sender, receiver) = broadcast::channel(2);
|
2021-10-08 13:46:23 +08:00
|
|
|
let revs_map = self.revs_map.clone();
|
|
|
|
let mut rx = sender.subscribe();
|
|
|
|
tokio::spawn(async move {
|
|
|
|
match rx.recv().await {
|
|
|
|
Ok(rev_id) => match revs_map.get_mut(&rev_id) {
|
|
|
|
None => {},
|
|
|
|
Some(mut rev) => rev.value_mut().state = RevState::Acked,
|
|
|
|
},
|
|
|
|
Err(_) => {},
|
|
|
|
}
|
2021-10-07 20:46:29 +08:00
|
|
|
});
|
2021-10-08 13:46:23 +08:00
|
|
|
|
|
|
|
let pending_rev = PendingRevId::new(revision.rev_id, sender);
|
|
|
|
self.pending_revs.write().await.push_back(pending_rev);
|
2021-11-03 13:52:33 +08:00
|
|
|
self.revs_map.insert(revision.rev_id, RevisionRecord::new(revision));
|
2021-10-08 13:46:23 +08:00
|
|
|
|
|
|
|
let _ = self.pending_tx.send(PendingMsg::Revision { ret: receiver });
|
2021-10-02 17:19:54 +08:00
|
|
|
self.save_revisions().await;
|
2021-10-04 14:24:35 +08:00
|
|
|
Ok(())
|
2021-10-02 17:19:54 +08:00
|
|
|
}
|
|
|
|
|
2021-10-08 13:46:23 +08:00
|
|
|
#[tracing::instrument(level = "debug", skip(self))]
|
2021-10-07 20:46:29 +08:00
|
|
|
pub async fn handle_revision_acked(&self, rev_id: RevId) {
|
|
|
|
let rev_id = rev_id.value;
|
2021-10-08 13:46:23 +08:00
|
|
|
self.pending_revs
|
|
|
|
.write()
|
|
|
|
.await
|
|
|
|
.retain(|pending| !pending.finish(rev_id));
|
|
|
|
|
2021-10-02 17:19:54 +08:00
|
|
|
self.save_revisions().await;
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn save_revisions(&self) {
|
2021-11-03 13:52:33 +08:00
|
|
|
if let Some(handler) = self.defer_save_oper.write().await.take() {
|
2021-10-02 17:19:54 +08:00
|
|
|
handler.abort();
|
|
|
|
}
|
|
|
|
|
2021-10-07 20:46:29 +08:00
|
|
|
if self.revs_map.is_empty() {
|
2021-10-02 17:19:54 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-10-07 20:46:29 +08:00
|
|
|
let revs_map = self.revs_map.clone();
|
2021-10-02 17:19:54 +08:00
|
|
|
let persistence = self.persistence.clone();
|
|
|
|
|
2021-11-03 13:52:33 +08:00
|
|
|
*self.defer_save_oper.write().await = Some(tokio::spawn(async move {
|
2021-10-02 17:19:54 +08:00
|
|
|
tokio::time::sleep(Duration::from_millis(300)).await;
|
2021-10-07 20:46:29 +08:00
|
|
|
let ids = revs_map.iter().map(|kv| kv.key().clone()).collect::<Vec<i64>>();
|
|
|
|
let revisions_state = revs_map
|
2021-10-02 17:19:54 +08:00
|
|
|
.iter()
|
2021-10-07 20:46:29 +08:00
|
|
|
.map(|kv| (kv.revision.clone(), kv.state))
|
2021-10-02 17:19:54 +08:00
|
|
|
.collect::<Vec<(Revision, RevState)>>();
|
|
|
|
|
2021-10-08 13:46:23 +08:00
|
|
|
match persistence.create_revs(revisions_state) {
|
2021-10-07 20:46:29 +08:00
|
|
|
Ok(_) => revs_map.retain(|k, _| !ids.contains(k)),
|
2021-10-02 17:19:54 +08:00
|
|
|
Err(e) => log::error!("Save revision failed: {:?}", e),
|
|
|
|
}
|
|
|
|
}));
|
|
|
|
}
|
2021-10-05 10:19:43 +08:00
|
|
|
|
2021-10-07 20:46:29 +08:00
|
|
|
pub async fn revs_in_range(&self, range: RevisionRange) -> DocResult<Vec<Revision>> {
|
2021-10-06 23:21:57 +08:00
|
|
|
let revs = range
|
|
|
|
.iter()
|
2021-10-07 20:46:29 +08:00
|
|
|
.flat_map(|rev_id| match self.revs_map.get(&rev_id) {
|
2021-10-06 23:21:57 +08:00
|
|
|
None => None,
|
2021-10-07 20:46:29 +08:00
|
|
|
Some(rev) => Some(rev.revision.clone()),
|
2021-10-05 10:19:43 +08:00
|
|
|
})
|
|
|
|
.collect::<Vec<Revision>>();
|
|
|
|
|
|
|
|
if revs.len() == range.len() as usize {
|
|
|
|
Ok(revs)
|
|
|
|
} else {
|
|
|
|
let doc_id = self.doc_id.clone();
|
|
|
|
let persistence = self.persistence.clone();
|
2021-10-08 13:46:23 +08:00
|
|
|
let result = spawn_blocking(move || persistence.read_rev_with_range(&doc_id, range))
|
|
|
|
.await
|
|
|
|
.map_err(internal_error)?;
|
2021-10-05 10:19:43 +08:00
|
|
|
result
|
|
|
|
}
|
|
|
|
}
|
2021-10-02 17:19:54 +08:00
|
|
|
|
2021-10-07 20:46:29 +08:00
|
|
|
pub async fn fetch_document(&self) -> DocResult<Doc> {
|
2021-10-05 23:18:19 +08:00
|
|
|
let result = fetch_from_local(&self.doc_id, self.persistence.clone()).await;
|
|
|
|
if result.is_ok() {
|
|
|
|
return result;
|
|
|
|
}
|
2021-10-02 17:19:54 +08:00
|
|
|
|
2021-10-06 15:23:38 +08:00
|
|
|
let doc = self.server.fetch_document_from_remote(&self.doc_id).await?;
|
|
|
|
let revision = revision_from_doc(doc.clone(), RevType::Remote);
|
2021-10-08 13:46:23 +08:00
|
|
|
let _ = self.persistence.create_revs(vec![(revision, RevState::Acked)])?;
|
2021-10-06 15:23:38 +08:00
|
|
|
Ok(doc)
|
2021-10-02 17:19:54 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-08 13:46:23 +08:00
|
|
|
impl RevisionIterator for RevisionStore {
|
|
|
|
fn next(&self) -> ResultFuture<Option<Revision>, DocError> {
|
|
|
|
let pending_revs = self.pending_revs.clone();
|
|
|
|
let revs_map = self.revs_map.clone();
|
|
|
|
let persistence = self.persistence.clone();
|
|
|
|
let doc_id = self.doc_id.clone();
|
|
|
|
ResultFuture::new(async move {
|
|
|
|
match pending_revs.read().await.front() {
|
|
|
|
None => Ok(None),
|
|
|
|
Some(pending) => match revs_map.get(&pending.rev_id) {
|
|
|
|
None => persistence.read_rev(&doc_id, &pending.rev_id),
|
|
|
|
Some(context) => Ok(Some(context.revision.clone())),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-06 15:23:38 +08:00
|
|
|
async fn fetch_from_local(doc_id: &str, persistence: Arc<Persistence>) -> DocResult<Doc> {
|
2021-10-02 17:19:54 +08:00
|
|
|
let doc_id = doc_id.to_owned();
|
|
|
|
spawn_blocking(move || {
|
|
|
|
let conn = &*persistence.pool.get().map_err(internal_error)?;
|
|
|
|
let revisions = persistence.rev_sql.read_rev_tables(&doc_id, None, conn)?;
|
|
|
|
if revisions.is_empty() {
|
2021-10-18 18:40:15 +08:00
|
|
|
return Err(DocError::record_not_found().context("Local doesn't have this document"));
|
2021-10-02 17:19:54 +08:00
|
|
|
}
|
|
|
|
|
2021-10-05 23:18:19 +08:00
|
|
|
let base_rev_id: RevId = revisions.last().unwrap().base_rev_id.into();
|
2021-10-02 21:35:06 +08:00
|
|
|
let rev_id: RevId = revisions.last().unwrap().rev_id.into();
|
2021-10-02 17:19:54 +08:00
|
|
|
let mut delta = Delta::new();
|
|
|
|
for revision in revisions {
|
|
|
|
match Delta::from_bytes(revision.delta_data) {
|
|
|
|
Ok(local_delta) => {
|
|
|
|
delta = delta.compose(&local_delta)?;
|
|
|
|
},
|
|
|
|
Err(e) => {
|
|
|
|
log::error!("Deserialize delta from revision failed: {}", e);
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2021-10-06 15:23:38 +08:00
|
|
|
Result::<Doc, DocError>::Ok(Doc {
|
|
|
|
id: doc_id,
|
|
|
|
data: delta.to_json(),
|
|
|
|
rev_id: rev_id.into(),
|
|
|
|
base_rev_id: base_rev_id.into(),
|
2021-10-05 23:18:19 +08:00
|
|
|
})
|
2021-10-02 17:19:54 +08:00
|
|
|
})
|
|
|
|
.await
|
|
|
|
.map_err(internal_error)?
|
|
|
|
}
|
|
|
|
|
|
|
|
// fn update_revisions(&self) {
|
|
|
|
// let rev_ids = self
|
|
|
|
// .revs
|
|
|
|
// .iter()
|
|
|
|
// .flat_map(|kv| match kv.state == RevState::Acked {
|
|
|
|
// true => None,
|
|
|
|
// false => Some(kv.key().clone()),
|
|
|
|
// })
|
|
|
|
// .collect::<Vec<i64>>();
|
|
|
|
//
|
|
|
|
// if rev_ids.is_empty() {
|
|
|
|
// return;
|
|
|
|
// }
|
|
|
|
//
|
2021-11-03 15:37:38 +08:00
|
|
|
// tracing::debug!("Try to update {:?} state", rev_ids);
|
2021-10-02 17:19:54 +08:00
|
|
|
// match self.update(&rev_ids) {
|
|
|
|
// Ok(_) => {
|
|
|
|
// self.revs.retain(|k, _| !rev_ids.contains(k));
|
|
|
|
// },
|
|
|
|
// Err(e) => log::error!("Save revision failed: {:?}", e),
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// fn update(&self, rev_ids: &Vec<i64>) -> Result<(), DocError> {
|
|
|
|
// let conn = &*self.pool.get().map_err(internal_error).unwrap();
|
|
|
|
// let result = conn.immediate_transaction::<_, DocError, _>(|| {
|
|
|
|
// for rev_id in rev_ids {
|
|
|
|
// let changeset = RevChangeset {
|
|
|
|
// doc_id: self.doc_id.clone(),
|
|
|
|
// rev_id: rev_id.clone(),
|
|
|
|
// state: RevState::Acked,
|
|
|
|
// };
|
|
|
|
// let _ = self.op_sql.update_rev_table(changeset, conn)?;
|
|
|
|
// }
|
|
|
|
// Ok(())
|
|
|
|
// });
|
|
|
|
//
|
|
|
|
// result
|
|
|
|
// }
|
|
|
|
|
2021-10-02 21:35:06 +08:00
|
|
|
// fn delete_revision(&self, rev_id: RevId) {
|
2021-10-02 17:19:54 +08:00
|
|
|
// let op_sql = self.op_sql.clone();
|
|
|
|
// let pool = self.pool.clone();
|
|
|
|
// let doc_id = self.doc_id.clone();
|
|
|
|
// tokio::spawn(async move {
|
|
|
|
// let conn = &*pool.get().map_err(internal_error).unwrap();
|
|
|
|
// let result = conn.immediate_transaction::<_, DocError, _>(|| {
|
|
|
|
// let _ = op_sql.delete_rev_table(&doc_id, rev_id, conn)?;
|
|
|
|
// Ok(())
|
|
|
|
// });
|
|
|
|
//
|
|
|
|
// match result {
|
|
|
|
// Ok(_) => {},
|
|
|
|
// Err(e) => log::error!("Delete revision failed: {:?}", e),
|
|
|
|
// }
|
|
|
|
// });
|
|
|
|
// }
|
2021-11-02 22:18:13 +08:00
|
|
|
|
|
|
|
pub(crate) enum PendingMsg {
|
|
|
|
Revision { ret: RevIdReceiver },
|
|
|
|
}
|
|
|
|
|
|
|
|
pub(crate) type PendingSender = mpsc::UnboundedSender<PendingMsg>;
|
|
|
|
pub(crate) type PendingReceiver = mpsc::UnboundedReceiver<PendingMsg>;
|
|
|
|
|
|
|
|
pub(crate) struct RevisionStream {
|
|
|
|
revisions: Arc<dyn RevisionIterator>,
|
|
|
|
receiver: Option<PendingReceiver>,
|
|
|
|
ws_revision_sender: mpsc::UnboundedSender<Revision>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl RevisionStream {
|
|
|
|
pub(crate) fn new(
|
|
|
|
revisions: Arc<dyn RevisionIterator>,
|
|
|
|
pending_rx: PendingReceiver,
|
|
|
|
ws_revision_sender: mpsc::UnboundedSender<Revision>,
|
|
|
|
) -> Self {
|
|
|
|
Self {
|
|
|
|
revisions,
|
|
|
|
receiver: Some(pending_rx),
|
|
|
|
ws_revision_sender,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn run(mut self) {
|
|
|
|
let mut receiver = self.receiver.take().expect("Should only call once");
|
|
|
|
let stream = stream! {
|
|
|
|
loop {
|
|
|
|
match receiver.recv().await {
|
|
|
|
Some(msg) => yield msg,
|
|
|
|
None => break,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
stream
|
|
|
|
.for_each(|msg| async {
|
|
|
|
match self.handle_msg(msg).await {
|
|
|
|
Ok(_) => {},
|
|
|
|
Err(e) => log::error!("{:?}", e),
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.await;
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn handle_msg(&self, msg: PendingMsg) -> DocResult<()> {
|
|
|
|
match msg {
|
|
|
|
PendingMsg::Revision { ret } => self.prepare_next_pending_rev(ret).await,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn prepare_next_pending_rev(&self, mut ret: RevIdReceiver) -> DocResult<()> {
|
|
|
|
match self.revisions.next().await? {
|
|
|
|
None => Ok(()),
|
|
|
|
Some(revision) => {
|
|
|
|
let _ = self.ws_revision_sender.send(revision).map_err(internal_error);
|
|
|
|
let _ = tokio::time::timeout(Duration::from_millis(2000), ret.recv()).await;
|
|
|
|
Ok(())
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|