2024-09-04 07:56:08 +08:00
|
|
|
use crate::entities::FileStatePB;
|
2024-06-20 07:44:57 +08:00
|
|
|
use crate::file_cache::FileTempStorage;
|
2024-07-22 09:43:48 +02:00
|
|
|
use crate::notification::{make_notification, StorageNotification};
|
2024-06-20 07:44:57 +08:00
|
|
|
use crate::sqlite_sql::{
|
2024-11-22 18:18:24 +08:00
|
|
|
batch_select_upload_file, delete_all_upload_parts, delete_upload_file,
|
|
|
|
delete_upload_file_by_file_id, insert_upload_file, insert_upload_part, is_upload_completed,
|
|
|
|
is_upload_exist, select_upload_file, select_upload_parts, update_upload_file_completed,
|
|
|
|
update_upload_file_upload_id, UploadFilePartTable, UploadFileTable,
|
2024-06-20 07:44:57 +08:00
|
|
|
};
|
|
|
|
use crate::uploader::{FileUploader, FileUploaderRunner, Signal, UploadTask, UploadTaskQueue};
|
2024-09-04 07:56:08 +08:00
|
|
|
use allo_isolate::Isolate;
|
2024-06-20 07:44:57 +08:00
|
|
|
use async_trait::async_trait;
|
2024-09-20 16:19:04 +08:00
|
|
|
use collab_importer::util::FileId;
|
2024-08-11 20:39:25 +08:00
|
|
|
use dashmap::DashMap;
|
2024-07-22 09:43:48 +02:00
|
|
|
use flowy_error::{ErrorCode, FlowyError, FlowyResult};
|
2024-06-20 07:44:57 +08:00
|
|
|
use flowy_sqlite::DBConnection;
|
2024-11-18 16:21:47 +08:00
|
|
|
use flowy_storage_pub::chunked_byte::{calculate_offsets, ChunkedBytes, MIN_CHUNK_SIZE};
|
2024-09-20 16:19:04 +08:00
|
|
|
use flowy_storage_pub::cloud::StorageCloudService;
|
2024-06-20 07:44:57 +08:00
|
|
|
use flowy_storage_pub::storage::{
|
2024-09-04 07:56:08 +08:00
|
|
|
CompletedPartRequest, CreatedUpload, FileProgress, FileProgressReceiver, FileUploadState,
|
|
|
|
ProgressNotifier, StorageService, UploadPartResponse,
|
2024-06-20 07:44:57 +08:00
|
|
|
};
|
|
|
|
use lib_infra::box_any::BoxAny;
|
2024-09-04 07:56:08 +08:00
|
|
|
use lib_infra::isolate_stream::{IsolateSink, SinkExt};
|
2024-06-20 07:44:57 +08:00
|
|
|
use lib_infra::util::timestamp;
|
|
|
|
use std::path::{Path, PathBuf};
|
2024-07-22 09:43:48 +02:00
|
|
|
use std::sync::atomic::AtomicBool;
|
2024-06-20 07:44:57 +08:00
|
|
|
use std::sync::Arc;
|
2024-09-20 16:19:04 +08:00
|
|
|
use tokio::io::AsyncWriteExt;
|
2024-09-04 07:56:08 +08:00
|
|
|
use tokio::sync::{broadcast, watch};
|
2024-06-20 07:44:57 +08:00
|
|
|
use tracing::{debug, error, info, instrument, trace};
|
|
|
|
|
|
|
|
pub trait StorageUserService: Send + Sync + 'static {
|
|
|
|
fn user_id(&self) -> Result<i64, FlowyError>;
|
|
|
|
fn workspace_id(&self) -> Result<String, FlowyError>;
|
|
|
|
fn sqlite_connection(&self, uid: i64) -> Result<DBConnection, FlowyError>;
|
|
|
|
fn get_application_root_dir(&self) -> &str;
|
|
|
|
}
|
|
|
|
|
2024-09-04 07:56:08 +08:00
|
|
|
type GlobalNotifier = broadcast::Sender<FileProgress>;
|
2024-06-20 07:44:57 +08:00
|
|
|
pub struct StorageManager {
|
|
|
|
pub storage_service: Arc<dyn StorageService>,
|
2024-09-04 07:56:08 +08:00
|
|
|
cloud_service: Arc<dyn StorageCloudService>,
|
|
|
|
user_service: Arc<dyn StorageUserService>,
|
2024-06-20 07:44:57 +08:00
|
|
|
uploader: Arc<FileUploader>,
|
2024-08-11 20:39:25 +08:00
|
|
|
progress_notifiers: Arc<DashMap<String, ProgressNotifier>>,
|
2024-09-04 07:56:08 +08:00
|
|
|
global_notifier: GlobalNotifier,
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for StorageManager {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
info!("[File] StorageManager is dropped");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl StorageManager {
|
|
|
|
pub fn new(
|
|
|
|
cloud_service: Arc<dyn StorageCloudService>,
|
|
|
|
user_service: Arc<dyn StorageUserService>,
|
|
|
|
) -> Self {
|
2024-07-22 09:43:48 +02:00
|
|
|
let is_exceed_storage_limit = Arc::new(AtomicBool::new(false));
|
2024-06-20 07:44:57 +08:00
|
|
|
let temp_storage_path = PathBuf::from(format!(
|
|
|
|
"{}/cache_files",
|
|
|
|
user_service.get_application_root_dir()
|
|
|
|
));
|
2024-11-06 11:18:40 +08:00
|
|
|
let (global_notifier, _) = broadcast::channel(2000);
|
2024-06-20 07:44:57 +08:00
|
|
|
let temp_storage = Arc::new(FileTempStorage::new(temp_storage_path));
|
|
|
|
let (notifier, notifier_rx) = watch::channel(Signal::Proceed);
|
|
|
|
let task_queue = Arc::new(UploadTaskQueue::new(notifier));
|
2024-08-11 20:39:25 +08:00
|
|
|
let progress_notifiers = Arc::new(DashMap::new());
|
2024-06-20 07:44:57 +08:00
|
|
|
let storage_service = Arc::new(StorageServiceImpl {
|
2024-09-04 07:56:08 +08:00
|
|
|
cloud_service: cloud_service.clone(),
|
2024-06-20 07:44:57 +08:00
|
|
|
user_service: user_service.clone(),
|
|
|
|
temp_storage,
|
|
|
|
task_queue: task_queue.clone(),
|
2024-07-22 09:43:48 +02:00
|
|
|
is_exceed_storage_limit: is_exceed_storage_limit.clone(),
|
2024-08-11 20:39:25 +08:00
|
|
|
progress_notifiers: progress_notifiers.clone(),
|
2024-09-04 07:56:08 +08:00
|
|
|
global_notifier: global_notifier.clone(),
|
2024-06-20 07:44:57 +08:00
|
|
|
});
|
|
|
|
|
2024-07-22 09:43:48 +02:00
|
|
|
let uploader = Arc::new(FileUploader::new(
|
|
|
|
storage_service.clone(),
|
|
|
|
task_queue,
|
|
|
|
is_exceed_storage_limit,
|
|
|
|
));
|
2024-06-20 07:44:57 +08:00
|
|
|
tokio::spawn(FileUploaderRunner::run(
|
|
|
|
Arc::downgrade(&uploader),
|
|
|
|
notifier_rx,
|
|
|
|
));
|
|
|
|
|
|
|
|
let weak_uploader = Arc::downgrade(&uploader);
|
2024-09-04 07:56:08 +08:00
|
|
|
let cloned_user_service = user_service.clone();
|
2024-06-20 07:44:57 +08:00
|
|
|
tokio::spawn(async move {
|
|
|
|
if let Some(uploader) = weak_uploader.upgrade() {
|
2024-09-04 07:56:08 +08:00
|
|
|
if let Err(err) = prepare_upload_task(uploader, cloned_user_service).await {
|
2024-06-20 07:44:57 +08:00
|
|
|
error!("prepare upload task failed: {}", err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2024-11-07 00:00:54 +08:00
|
|
|
let mut rx = global_notifier.subscribe();
|
|
|
|
let weak_notifier = Arc::downgrade(&progress_notifiers);
|
|
|
|
tokio::spawn(async move {
|
|
|
|
while let Ok(progress) = rx.recv().await {
|
|
|
|
if let Some(notifiers) = weak_notifier.upgrade() {
|
|
|
|
if let Some(mut notifier) = notifiers.get_mut(&progress.file_id) {
|
|
|
|
if progress.progress >= 1.0 {
|
|
|
|
let finish = FileUploadState::Finished {
|
|
|
|
file_id: progress.file_id,
|
|
|
|
};
|
|
|
|
notifier.notify(finish).await;
|
|
|
|
} else {
|
|
|
|
let progress = FileUploadState::Uploading {
|
|
|
|
progress: progress.progress,
|
|
|
|
};
|
|
|
|
notifier.notify(progress).await;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
info!("progress notifiers is dropped");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2024-06-20 07:44:57 +08:00
|
|
|
Self {
|
|
|
|
storage_service,
|
2024-09-04 07:56:08 +08:00
|
|
|
cloud_service,
|
|
|
|
user_service,
|
2024-06-20 07:44:57 +08:00
|
|
|
uploader,
|
2024-08-11 20:39:25 +08:00
|
|
|
progress_notifiers,
|
2024-09-04 07:56:08 +08:00
|
|
|
global_notifier,
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-09-04 07:56:08 +08:00
|
|
|
pub async fn register_file_progress_stream(&self, port: i64) {
|
|
|
|
info!("register file progress stream: {}", port);
|
|
|
|
let mut sink = IsolateSink::new(Isolate::new(port));
|
|
|
|
let mut rx = self.global_notifier.subscribe();
|
|
|
|
tokio::spawn(async move {
|
|
|
|
while let Ok(progress) = rx.recv().await {
|
|
|
|
if let Ok(s) = serde_json::to_string(&progress) {
|
|
|
|
if let Err(err) = sink.send(s).await {
|
|
|
|
error!("[File]: send file progress failed: {}", err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn query_file_state(&self, url: &str) -> Option<FileStatePB> {
|
|
|
|
let (workspace_id, parent_dir, file_id) = self.cloud_service.parse_object_url_v1(url).await?;
|
|
|
|
let current_workspace_id = self.user_service.workspace_id().ok()?;
|
|
|
|
if workspace_id != current_workspace_id {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let uid = self.user_service.user_id().ok()?;
|
|
|
|
let mut conn = self.user_service.sqlite_connection(uid).ok()?;
|
|
|
|
let is_finish = is_upload_completed(&mut conn, &workspace_id, &parent_dir, &file_id).ok()?;
|
|
|
|
|
2024-09-05 17:35:24 +08:00
|
|
|
if let Err(err) = self.global_notifier.send(FileProgress::new_progress(
|
|
|
|
url.to_string(),
|
2024-11-07 00:00:54 +08:00
|
|
|
file_id.clone(),
|
2024-09-05 17:35:24 +08:00
|
|
|
if is_finish { 1.0 } else { 0.0 },
|
|
|
|
)) {
|
2024-09-04 07:56:08 +08:00
|
|
|
error!("[File] send global notifier failed: {}", err);
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(FileStatePB { file_id, is_finish })
|
|
|
|
}
|
|
|
|
|
2024-11-22 18:18:24 +08:00
|
|
|
pub async fn initialize(&self, workspace_id: &str) {
|
2024-08-27 14:22:57 +08:00
|
|
|
self.enable_storage_write_access();
|
2024-11-22 18:18:24 +08:00
|
|
|
|
|
|
|
if let Err(err) = prepare_upload_task(self.uploader.clone(), self.user_service.clone()).await {
|
|
|
|
error!("prepare {} upload task failed: {}", workspace_id, err);
|
|
|
|
}
|
2024-08-27 14:22:57 +08:00
|
|
|
}
|
|
|
|
|
2024-06-20 07:44:57 +08:00
|
|
|
pub fn update_network_reachable(&self, reachable: bool) {
|
|
|
|
if reachable {
|
|
|
|
self.uploader.resume();
|
|
|
|
} else {
|
|
|
|
self.uploader.pause();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-07-22 09:43:48 +02:00
|
|
|
pub fn disable_storage_write_access(&self) {
|
|
|
|
// when storage is purchased, resume the uploader
|
|
|
|
self.uploader.disable_storage_write();
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn enable_storage_write_access(&self) {
|
|
|
|
// when storage is purchased, resume the uploader
|
|
|
|
self.uploader.enable_storage_write();
|
|
|
|
}
|
|
|
|
|
2024-08-11 20:39:25 +08:00
|
|
|
pub async fn subscribe_file_state(
|
|
|
|
&self,
|
2024-08-20 15:18:57 +08:00
|
|
|
parent_dir: &str,
|
2024-08-11 20:39:25 +08:00
|
|
|
file_id: &str,
|
2024-08-20 15:18:57 +08:00
|
|
|
) -> Result<Option<FileProgressReceiver>, FlowyError> {
|
|
|
|
self
|
|
|
|
.storage_service
|
|
|
|
.subscribe_file_progress(parent_dir, file_id)
|
|
|
|
.await
|
2024-08-11 20:39:25 +08:00
|
|
|
}
|
|
|
|
|
2024-11-22 18:18:24 +08:00
|
|
|
/// Returns None if the file with given file_id is not exist
|
|
|
|
/// When delete a file, the progress notifier for given file_id will be deleted too
|
2024-08-11 20:39:25 +08:00
|
|
|
pub async fn get_file_state(&self, file_id: &str) -> Option<FileUploadState> {
|
|
|
|
self
|
|
|
|
.progress_notifiers
|
|
|
|
.get(file_id)
|
|
|
|
.and_then(|notifier| notifier.value().current_value.clone())
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
2024-11-22 18:18:24 +08:00
|
|
|
|
|
|
|
pub async fn get_all_tasks(&self) -> FlowyResult<Vec<UploadTask>> {
|
|
|
|
let tasks = self.uploader.all_tasks().await;
|
|
|
|
Ok(tasks)
|
|
|
|
}
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
async fn prepare_upload_task(
|
|
|
|
uploader: Arc<FileUploader>,
|
|
|
|
user_service: Arc<dyn StorageUserService>,
|
|
|
|
) -> FlowyResult<()> {
|
2024-11-22 18:18:24 +08:00
|
|
|
if let Ok(uid) = user_service.user_id() {
|
|
|
|
let workspace_id = user_service.workspace_id()?;
|
|
|
|
let conn = user_service.sqlite_connection(uid)?;
|
|
|
|
let upload_files = batch_select_upload_file(conn, &workspace_id, 100, false)?;
|
|
|
|
let tasks = upload_files
|
|
|
|
.into_iter()
|
|
|
|
.map(|upload_file| UploadTask::BackgroundTask {
|
|
|
|
workspace_id: upload_file.workspace_id,
|
|
|
|
file_id: upload_file.file_id,
|
|
|
|
parent_dir: upload_file.parent_dir,
|
|
|
|
created_at: upload_file.created_at,
|
|
|
|
retry_count: 0,
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
info!("[File] prepare upload task: {}", tasks.len());
|
|
|
|
uploader.queue_tasks(tasks).await;
|
|
|
|
}
|
2024-06-20 07:44:57 +08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct StorageServiceImpl {
|
|
|
|
cloud_service: Arc<dyn StorageCloudService>,
|
|
|
|
user_service: Arc<dyn StorageUserService>,
|
|
|
|
temp_storage: Arc<FileTempStorage>,
|
|
|
|
task_queue: Arc<UploadTaskQueue>,
|
2024-07-22 09:43:48 +02:00
|
|
|
is_exceed_storage_limit: Arc<AtomicBool>,
|
2024-08-11 20:39:25 +08:00
|
|
|
progress_notifiers: Arc<DashMap<String, ProgressNotifier>>,
|
2024-09-04 07:56:08 +08:00
|
|
|
global_notifier: GlobalNotifier,
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[async_trait]
|
|
|
|
impl StorageService for StorageServiceImpl {
|
2024-11-22 18:18:24 +08:00
|
|
|
async fn delete_object(&self, url: String) -> FlowyResult<()> {
|
|
|
|
if let Some((workspace_id, parent_dir, file_id)) =
|
|
|
|
self.cloud_service.parse_object_url_v1(&url).await
|
|
|
|
{
|
|
|
|
info!(
|
|
|
|
"[File] delete object: workspace: {}, parent_dir: {}, file_id: {}",
|
|
|
|
workspace_id, parent_dir, file_id
|
|
|
|
);
|
|
|
|
|
|
|
|
self
|
|
|
|
.task_queue
|
|
|
|
.remove_task(&workspace_id, &parent_dir, &file_id)
|
|
|
|
.await;
|
|
|
|
|
|
|
|
trace!("[File] delete progress notifier: {}", file_id);
|
|
|
|
self.progress_notifiers.remove(&file_id);
|
|
|
|
match delete_upload_file_by_file_id(
|
|
|
|
self
|
|
|
|
.user_service
|
|
|
|
.sqlite_connection(self.user_service.user_id()?)?,
|
|
|
|
&workspace_id,
|
|
|
|
&parent_dir,
|
|
|
|
&file_id,
|
|
|
|
) {
|
|
|
|
Ok(Some(file)) => {
|
|
|
|
let file_path = file.local_file_path;
|
|
|
|
match tokio::fs::remove_file(&file_path).await {
|
|
|
|
Ok(_) => debug!("[File] deleted file from local disk: {}", file_path),
|
|
|
|
Err(err) => {
|
|
|
|
error!("[File] delete file at {} failed: {}", file_path, err);
|
|
|
|
},
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Ok(None) => {
|
|
|
|
info!(
|
|
|
|
"[File]: can not find file record for url: {} when delete",
|
|
|
|
url
|
|
|
|
);
|
2024-06-20 07:44:57 +08:00
|
|
|
},
|
|
|
|
Err(err) => {
|
2024-11-22 18:18:24 +08:00
|
|
|
error!("[File] delete upload file failed: {}", err);
|
2024-06-20 07:44:57 +08:00
|
|
|
},
|
|
|
|
}
|
2024-11-22 18:18:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let _ = self.cloud_service.delete_object(&url).await;
|
2024-06-20 07:44:57 +08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn download_object(&self, url: String, local_file_path: String) -> FlowyResult<()> {
|
|
|
|
let cloud_service = self.cloud_service.clone();
|
|
|
|
tokio::spawn(async move {
|
|
|
|
if tokio::fs::metadata(&local_file_path).await.is_ok() {
|
|
|
|
tracing::warn!("file already exist in user local disk: {}", local_file_path);
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
let object_value = cloud_service.get_object(url).await?;
|
|
|
|
let mut file = tokio::fs::OpenOptions::new()
|
|
|
|
.create(true)
|
|
|
|
.truncate(true)
|
|
|
|
.write(true)
|
|
|
|
.open(&local_file_path)
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
match file.write(&object_value.raw).await {
|
|
|
|
Ok(n) => {
|
|
|
|
info!("downloaded {} bytes to file: {}", n, local_file_path);
|
|
|
|
},
|
|
|
|
Err(err) => {
|
|
|
|
error!("write file failed: {}", err);
|
|
|
|
},
|
|
|
|
}
|
|
|
|
Ok::<_, FlowyError>(())
|
|
|
|
});
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2024-08-11 20:39:25 +08:00
|
|
|
async fn create_upload(
|
2024-06-20 07:44:57 +08:00
|
|
|
&self,
|
|
|
|
workspace_id: &str,
|
|
|
|
parent_dir: &str,
|
|
|
|
file_path: &str,
|
2024-08-11 20:39:25 +08:00
|
|
|
) -> Result<(CreatedUpload, Option<FileProgressReceiver>), FlowyError> {
|
2024-06-20 07:44:57 +08:00
|
|
|
if workspace_id.is_empty() {
|
2024-08-11 20:39:25 +08:00
|
|
|
return Err(FlowyError::internal().with_context("workspace id is empty"));
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if parent_dir.is_empty() {
|
2024-08-11 20:39:25 +08:00
|
|
|
return Err(FlowyError::internal().with_context("parent dir is empty"));
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if file_path.is_empty() {
|
2024-08-11 20:39:25 +08:00
|
|
|
return Err(FlowyError::internal().with_context("local file path is empty"));
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let workspace_id = workspace_id.to_string();
|
|
|
|
let parent_dir = parent_dir.to_string();
|
|
|
|
let file_path = file_path.to_string();
|
|
|
|
|
2024-08-11 20:39:25 +08:00
|
|
|
let is_exceed_limit = self
|
|
|
|
.is_exceed_storage_limit
|
|
|
|
.load(std::sync::atomic::Ordering::Relaxed);
|
|
|
|
if is_exceed_limit {
|
|
|
|
make_notification(StorageNotification::FileStorageLimitExceeded)
|
|
|
|
.payload(FlowyError::file_storage_limit())
|
|
|
|
.send();
|
2024-06-20 07:44:57 +08:00
|
|
|
|
2024-08-11 20:39:25 +08:00
|
|
|
return Err(FlowyError::file_storage_limit());
|
|
|
|
}
|
2024-06-20 07:44:57 +08:00
|
|
|
|
2024-11-18 16:47:45 +08:00
|
|
|
let local_file_path = self
|
|
|
|
.temp_storage
|
|
|
|
.create_temp_file_from_existing(Path::new(&file_path))
|
|
|
|
.await
|
|
|
|
.map_err(|err| {
|
|
|
|
error!("[File] create temp file failed: {}", err);
|
|
|
|
FlowyError::internal()
|
|
|
|
.with_context(format!("create temp file for upload file failed: {}", err))
|
|
|
|
})?;
|
2024-08-11 20:39:25 +08:00
|
|
|
|
|
|
|
// 1. create a file record and chunk the file
|
2024-11-18 16:21:47 +08:00
|
|
|
let record = create_upload_record(workspace_id, parent_dir, local_file_path.clone()).await?;
|
2024-08-11 20:39:25 +08:00
|
|
|
// 2. save the record to sqlite
|
|
|
|
let conn = self
|
|
|
|
.user_service
|
|
|
|
.sqlite_connection(self.user_service.user_id()?)?;
|
2024-08-14 15:50:21 +08:00
|
|
|
let url = self
|
|
|
|
.cloud_service
|
|
|
|
.get_object_url_v1(&record.workspace_id, &record.parent_dir, &record.file_id)
|
|
|
|
.await?;
|
2024-08-11 20:39:25 +08:00
|
|
|
let file_id = record.file_id.clone();
|
|
|
|
match insert_upload_file(conn, &record) {
|
|
|
|
Ok(_) => {
|
|
|
|
// 3. generate url for given file
|
2024-11-22 18:18:24 +08:00
|
|
|
self
|
|
|
|
.task_queue
|
|
|
|
.queue_task(UploadTask::Task {
|
|
|
|
local_file_path,
|
|
|
|
record,
|
|
|
|
retry_count: 3,
|
|
|
|
})
|
|
|
|
.await;
|
2024-07-22 09:43:48 +02:00
|
|
|
|
2024-09-04 07:56:08 +08:00
|
|
|
let notifier = ProgressNotifier::new(file_id.to_string());
|
|
|
|
let receiver = notifier.subscribe();
|
2024-11-22 18:18:24 +08:00
|
|
|
trace!("[File] create upload progress notifier: {}", file_id);
|
2024-08-11 20:39:25 +08:00
|
|
|
self
|
|
|
|
.progress_notifiers
|
|
|
|
.insert(file_id.to_string(), notifier);
|
|
|
|
Ok::<_, FlowyError>((CreatedUpload { url, file_id }, Some(receiver)))
|
|
|
|
},
|
|
|
|
Err(err) => {
|
|
|
|
if matches!(err.code, ErrorCode::DuplicateSqliteRecord) {
|
2024-11-06 11:18:40 +08:00
|
|
|
info!("[File] upload record already exists, skip creating new upload task");
|
2024-08-11 20:39:25 +08:00
|
|
|
Ok::<_, FlowyError>((CreatedUpload { url, file_id }, None))
|
|
|
|
} else {
|
|
|
|
Err(err)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
2024-11-18 16:21:47 +08:00
|
|
|
async fn start_upload(&self, record: &BoxAny) -> Result<(), FlowyError> {
|
2024-06-20 07:44:57 +08:00
|
|
|
let file_record = record.downcast_ref::<UploadFileTable>().ok_or_else(|| {
|
|
|
|
FlowyError::internal().with_context("failed to downcast record to UploadFileTable")
|
|
|
|
})?;
|
|
|
|
|
2024-11-22 18:18:24 +08:00
|
|
|
// If the file is already uploaded, skip the upload process
|
|
|
|
if !is_upload_exist(
|
|
|
|
self
|
|
|
|
.user_service
|
|
|
|
.sqlite_connection(self.user_service.user_id()?)?,
|
|
|
|
&file_record.upload_id,
|
|
|
|
)? {
|
|
|
|
info!(
|
|
|
|
"[File] skip upload, {} was deleted",
|
|
|
|
file_record.local_file_path
|
|
|
|
);
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
start_upload(self, file_record).await?;
|
2024-11-06 11:18:40 +08:00
|
|
|
|
2024-06-20 07:44:57 +08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn resume_upload(
|
|
|
|
&self,
|
|
|
|
workspace_id: &str,
|
|
|
|
parent_dir: &str,
|
|
|
|
file_id: &str,
|
|
|
|
) -> Result<(), FlowyError> {
|
|
|
|
// Gathering the upload record and parts from the sqlite database.
|
2024-08-20 15:18:57 +08:00
|
|
|
let mut conn = self
|
|
|
|
.user_service
|
|
|
|
.sqlite_connection(self.user_service.user_id()?)?;
|
2024-06-20 07:44:57 +08:00
|
|
|
|
2024-08-20 15:18:57 +08:00
|
|
|
if let Some(upload_file) = select_upload_file(&mut conn, workspace_id, parent_dir, file_id)? {
|
2024-11-22 18:18:24 +08:00
|
|
|
resume_upload(self, upload_file).await?;
|
2024-06-20 07:44:57 +08:00
|
|
|
} else {
|
2024-11-22 18:18:24 +08:00
|
|
|
error!(
|
|
|
|
"[File] resume upload failed: can not found {}:{}",
|
|
|
|
parent_dir, file_id
|
|
|
|
);
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
2024-08-11 20:39:25 +08:00
|
|
|
|
|
|
|
async fn subscribe_file_progress(
|
|
|
|
&self,
|
2024-08-20 15:18:57 +08:00
|
|
|
parent_idr: &str,
|
2024-08-11 20:39:25 +08:00
|
|
|
file_id: &str,
|
2024-08-20 15:18:57 +08:00
|
|
|
) -> Result<Option<FileProgressReceiver>, FlowyError> {
|
2024-08-11 20:39:25 +08:00
|
|
|
trace!("[File]: subscribe file progress: {}", file_id);
|
2024-08-20 15:18:57 +08:00
|
|
|
|
|
|
|
let is_completed = {
|
|
|
|
let mut conn = self
|
|
|
|
.user_service
|
|
|
|
.sqlite_connection(self.user_service.user_id()?)?;
|
|
|
|
let workspace_id = self.user_service.workspace_id()?;
|
|
|
|
is_upload_completed(&mut conn, &workspace_id, parent_idr, file_id).unwrap_or(false)
|
|
|
|
};
|
2024-09-04 07:56:08 +08:00
|
|
|
|
2024-08-20 15:18:57 +08:00
|
|
|
if is_completed {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
|
2024-09-04 07:56:08 +08:00
|
|
|
let notifier = self
|
2024-08-11 20:39:25 +08:00
|
|
|
.progress_notifiers
|
2024-09-04 07:56:08 +08:00
|
|
|
.entry(file_id.to_string())
|
|
|
|
.or_insert_with(|| ProgressNotifier::new(file_id.to_string()));
|
|
|
|
Ok(Some(notifier.subscribe()))
|
2024-08-11 20:39:25 +08:00
|
|
|
}
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
async fn create_upload_record(
|
|
|
|
workspace_id: String,
|
|
|
|
parent_dir: String,
|
|
|
|
local_file_path: String,
|
2024-11-18 16:21:47 +08:00
|
|
|
) -> FlowyResult<UploadFileTable> {
|
|
|
|
let file_path = Path::new(&local_file_path);
|
|
|
|
let file = tokio::fs::File::open(&file_path).await?;
|
|
|
|
let metadata = file.metadata().await?;
|
|
|
|
let file_size = metadata.len() as usize;
|
|
|
|
|
|
|
|
// Calculate the total number of chunks
|
|
|
|
let num_chunk = calculate_offsets(file_size, MIN_CHUNK_SIZE).len();
|
2024-11-22 18:18:24 +08:00
|
|
|
let content_type = mime_guess::from_path(file_path)
|
2024-06-20 07:44:57 +08:00
|
|
|
.first_or_octet_stream()
|
|
|
|
.to_string();
|
2024-11-18 16:21:47 +08:00
|
|
|
let file_id = FileId::from_path(&file_path.to_path_buf()).await?;
|
2024-06-20 07:44:57 +08:00
|
|
|
let record = UploadFileTable {
|
|
|
|
workspace_id,
|
|
|
|
file_id,
|
2024-11-07 00:00:54 +08:00
|
|
|
// When the upload_id is empty string, we will create a new upload using [Self::start_upload] method
|
2024-06-20 07:44:57 +08:00
|
|
|
upload_id: "".to_string(),
|
|
|
|
parent_dir,
|
|
|
|
local_file_path,
|
|
|
|
content_type,
|
2024-11-18 16:21:47 +08:00
|
|
|
chunk_size: MIN_CHUNK_SIZE as i32,
|
|
|
|
num_chunk: num_chunk as i32,
|
2024-06-20 07:44:57 +08:00
|
|
|
created_at: timestamp(),
|
2024-08-20 15:18:57 +08:00
|
|
|
is_finish: false,
|
2024-06-20 07:44:57 +08:00
|
|
|
};
|
2024-11-18 16:21:47 +08:00
|
|
|
Ok(record)
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#[instrument(level = "debug", skip_all, err)]
|
|
|
|
async fn start_upload(
|
2024-11-22 18:18:24 +08:00
|
|
|
storage_service: &StorageServiceImpl,
|
2024-06-20 07:44:57 +08:00
|
|
|
upload_file: &UploadFileTable,
|
|
|
|
) -> FlowyResult<()> {
|
2024-11-22 18:18:24 +08:00
|
|
|
let temp_storage = &storage_service.temp_storage;
|
|
|
|
let user_service = &storage_service.user_service;
|
|
|
|
let global_notifier = storage_service.global_notifier.clone();
|
|
|
|
let cloud_service = &storage_service.cloud_service;
|
|
|
|
|
2024-08-20 15:18:57 +08:00
|
|
|
// 4. gather existing completed parts
|
|
|
|
let mut conn = user_service.sqlite_connection(user_service.user_id()?)?;
|
|
|
|
let mut completed_parts = select_upload_parts(&mut conn, &upload_file.upload_id)
|
|
|
|
.unwrap_or_default()
|
|
|
|
.into_iter()
|
|
|
|
.map(|part| CompletedPartRequest {
|
|
|
|
e_tag: part.e_tag,
|
|
|
|
part_number: part.part_num,
|
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
2024-11-18 16:21:47 +08:00
|
|
|
let upload_offset = completed_parts.len() as u64;
|
|
|
|
|
|
|
|
let file_path = Path::new(&upload_file.local_file_path);
|
|
|
|
if !file_path.exists() {
|
|
|
|
error!("[File] file not found: {}", upload_file.local_file_path);
|
|
|
|
if let Ok(uid) = user_service.user_id() {
|
|
|
|
if let Ok(conn) = user_service.sqlite_connection(uid) {
|
|
|
|
delete_upload_file(conn, &upload_file.upload_id)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-11-22 18:18:24 +08:00
|
|
|
let file_size = file_path
|
|
|
|
.metadata()
|
|
|
|
.map(|metadata| metadata.len())
|
|
|
|
.unwrap_or(0);
|
2024-08-20 15:18:57 +08:00
|
|
|
|
2024-11-18 16:21:47 +08:00
|
|
|
let mut chunked_bytes =
|
|
|
|
ChunkedBytes::from_file(&upload_file.local_file_path, MIN_CHUNK_SIZE).await?;
|
|
|
|
let total_parts = chunked_bytes.total_chunks();
|
|
|
|
if let Err(err) = chunked_bytes.set_offset(upload_offset).await {
|
|
|
|
error!(
|
|
|
|
"[File] set offset failed: {} for file: {}",
|
|
|
|
err, upload_file.local_file_path
|
|
|
|
);
|
|
|
|
if let Ok(uid) = user_service.user_id() {
|
|
|
|
if let Ok(conn) = user_service.sqlite_connection(uid) {
|
|
|
|
delete_upload_file(conn, &upload_file.upload_id)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-08-20 15:18:57 +08:00
|
|
|
|
|
|
|
info!(
|
|
|
|
"[File] start upload: workspace: {}, parent_dir: {}, file_id: {}, chunk: {}",
|
|
|
|
upload_file.workspace_id, upload_file.parent_dir, upload_file.file_id, chunked_bytes,
|
|
|
|
);
|
|
|
|
|
2024-06-20 07:44:57 +08:00
|
|
|
let mut upload_file = upload_file.clone();
|
2024-11-07 00:00:54 +08:00
|
|
|
// 1. create upload
|
|
|
|
trace!(
|
|
|
|
"[File] create upload for workspace: {}, parent_dir: {}, file_id: {}",
|
|
|
|
upload_file.workspace_id,
|
|
|
|
upload_file.parent_dir,
|
|
|
|
upload_file.file_id
|
|
|
|
);
|
2024-07-22 09:43:48 +02:00
|
|
|
|
2024-11-07 00:00:54 +08:00
|
|
|
let create_upload_resp_result = cloud_service
|
|
|
|
.create_upload(
|
2024-06-20 07:44:57 +08:00
|
|
|
&upload_file.workspace_id,
|
|
|
|
&upload_file.parent_dir,
|
|
|
|
&upload_file.file_id,
|
2024-11-07 00:00:54 +08:00
|
|
|
&upload_file.content_type,
|
2024-11-22 18:18:24 +08:00
|
|
|
file_size,
|
2024-11-07 00:00:54 +08:00
|
|
|
)
|
|
|
|
.await;
|
2024-11-22 18:18:24 +08:00
|
|
|
|
|
|
|
let file_url = cloud_service
|
|
|
|
.get_object_url_v1(
|
|
|
|
&upload_file.workspace_id,
|
|
|
|
&upload_file.parent_dir,
|
|
|
|
&upload_file.file_id,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
2024-11-07 00:00:54 +08:00
|
|
|
if let Err(err) = create_upload_resp_result.as_ref() {
|
2024-11-22 18:18:24 +08:00
|
|
|
handle_upload_error(storage_service, err, &file_url).await;
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
2024-11-07 00:00:54 +08:00
|
|
|
let create_upload_resp = create_upload_resp_result?;
|
|
|
|
|
|
|
|
// 2. update upload_id
|
|
|
|
let conn = user_service.sqlite_connection(user_service.user_id()?)?;
|
|
|
|
update_upload_file_upload_id(
|
|
|
|
conn,
|
|
|
|
&upload_file.workspace_id,
|
|
|
|
&upload_file.parent_dir,
|
|
|
|
&upload_file.file_id,
|
|
|
|
&create_upload_resp.upload_id,
|
|
|
|
)?;
|
|
|
|
|
|
|
|
trace!(
|
|
|
|
"[File] {} update upload_id: {}",
|
|
|
|
upload_file.file_id,
|
|
|
|
create_upload_resp.upload_id
|
|
|
|
);
|
|
|
|
upload_file.upload_id = create_upload_resp.upload_id;
|
2024-06-20 07:44:57 +08:00
|
|
|
|
|
|
|
// 3. start uploading parts
|
2024-11-06 11:18:40 +08:00
|
|
|
info!(
|
|
|
|
"[File] {} start uploading parts:{}, offset:{}",
|
2024-06-20 07:44:57 +08:00
|
|
|
upload_file.file_id,
|
2024-11-18 16:21:47 +08:00
|
|
|
chunked_bytes.total_chunks(),
|
2024-11-06 11:18:40 +08:00
|
|
|
upload_offset,
|
2024-06-20 07:44:57 +08:00
|
|
|
);
|
2024-11-06 11:18:40 +08:00
|
|
|
|
2024-11-18 16:21:47 +08:00
|
|
|
let mut part_number = upload_offset + 1;
|
|
|
|
while let Some(chunk_result) = chunked_bytes.next_chunk().await {
|
|
|
|
match chunk_result {
|
|
|
|
Ok(chunk_bytes) => {
|
|
|
|
info!(
|
|
|
|
"[File] {} uploading {}th part, size:{}KB",
|
|
|
|
upload_file.file_id,
|
|
|
|
part_number,
|
|
|
|
chunk_bytes.len() / 1000,
|
|
|
|
);
|
|
|
|
|
|
|
|
// start uploading parts
|
|
|
|
match upload_part(
|
|
|
|
cloud_service,
|
|
|
|
user_service,
|
|
|
|
&upload_file.workspace_id,
|
|
|
|
&upload_file.parent_dir,
|
|
|
|
&upload_file.upload_id,
|
|
|
|
&upload_file.file_id,
|
|
|
|
part_number as i32,
|
|
|
|
chunk_bytes.to_vec(),
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(resp) => {
|
|
|
|
trace!(
|
|
|
|
"[File] {} part {} uploaded",
|
|
|
|
upload_file.file_id,
|
|
|
|
part_number
|
|
|
|
);
|
|
|
|
let mut progress_value = (part_number as f64 / total_parts as f64).clamp(0.0, 1.0);
|
|
|
|
// The 0.1 is reserved for the complete_upload progress
|
|
|
|
if progress_value >= 0.9 {
|
|
|
|
progress_value = 0.9;
|
|
|
|
}
|
2024-11-22 18:18:24 +08:00
|
|
|
let progress = FileProgress::new_progress(
|
|
|
|
file_url.clone(),
|
|
|
|
upload_file.file_id.clone(),
|
|
|
|
progress_value,
|
|
|
|
);
|
2024-11-18 16:21:47 +08:00
|
|
|
trace!("[File] upload progress: {}", progress);
|
2024-08-11 20:39:25 +08:00
|
|
|
|
2024-11-18 16:21:47 +08:00
|
|
|
if let Err(err) = global_notifier.send(progress) {
|
|
|
|
error!("[File] send global notifier failed: {}", err);
|
|
|
|
}
|
2024-07-22 09:43:48 +02:00
|
|
|
|
2024-11-18 16:21:47 +08:00
|
|
|
// gather completed part
|
|
|
|
completed_parts.push(CompletedPartRequest {
|
|
|
|
e_tag: resp.e_tag,
|
|
|
|
part_number: resp.part_num,
|
|
|
|
});
|
|
|
|
},
|
|
|
|
Err(err) => {
|
|
|
|
error!(
|
|
|
|
"[File] {} failed to upload part: {}",
|
|
|
|
upload_file.file_id, err
|
|
|
|
);
|
2024-11-22 18:18:24 +08:00
|
|
|
handle_upload_error(storage_service, &err, &file_url).await;
|
2024-11-18 16:21:47 +08:00
|
|
|
if let Err(err) = global_notifier.send(FileProgress::new_error(
|
2024-11-22 18:18:24 +08:00
|
|
|
file_url.clone(),
|
2024-11-18 16:21:47 +08:00
|
|
|
upload_file.file_id.clone(),
|
|
|
|
err.msg.clone(),
|
|
|
|
)) {
|
|
|
|
error!("[File] send global notifier failed: {}", err);
|
|
|
|
}
|
|
|
|
return Err(err);
|
|
|
|
},
|
2024-11-06 11:18:40 +08:00
|
|
|
}
|
2024-11-18 16:21:47 +08:00
|
|
|
part_number += 1; // Increment part number
|
|
|
|
},
|
|
|
|
Err(e) => {
|
|
|
|
error!(
|
|
|
|
"[File] {} failed to read chunk: {:?}",
|
|
|
|
upload_file.file_id, e
|
|
|
|
);
|
|
|
|
break;
|
2024-06-20 07:44:57 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// mark it as completed
|
2024-07-22 09:43:48 +02:00
|
|
|
let complete_upload_result = complete_upload(
|
2024-06-24 14:19:36 +08:00
|
|
|
cloud_service,
|
|
|
|
user_service,
|
2024-06-20 07:44:57 +08:00
|
|
|
temp_storage,
|
|
|
|
&upload_file,
|
|
|
|
completed_parts,
|
2024-09-04 07:56:08 +08:00
|
|
|
&global_notifier,
|
2024-06-20 07:44:57 +08:00
|
|
|
)
|
2024-07-22 09:43:48 +02:00
|
|
|
.await;
|
|
|
|
if let Err(err) = complete_upload_result {
|
2024-11-22 18:18:24 +08:00
|
|
|
handle_upload_error(storage_service, &err, &file_url).await;
|
2024-11-06 11:18:40 +08:00
|
|
|
return Err(err);
|
2024-07-22 09:43:48 +02:00
|
|
|
}
|
2024-06-20 07:44:57 +08:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2024-11-22 18:18:24 +08:00
|
|
|
async fn handle_upload_error(
|
|
|
|
storage_service: &StorageServiceImpl,
|
2024-11-18 16:21:47 +08:00
|
|
|
err: &FlowyError,
|
2024-11-22 18:18:24 +08:00
|
|
|
file_url: &str,
|
2024-11-18 16:21:47 +08:00
|
|
|
) {
|
|
|
|
if err.is_file_limit_exceeded() {
|
|
|
|
make_notification(StorageNotification::FileStorageLimitExceeded)
|
|
|
|
.payload(err.clone())
|
|
|
|
.send();
|
|
|
|
}
|
|
|
|
|
|
|
|
if err.is_single_file_limit_exceeded() {
|
2024-11-22 18:18:24 +08:00
|
|
|
info!("[File] file exceed limit:{}", file_url);
|
|
|
|
if let Err(err) = storage_service.delete_object(file_url.to_string()).await {
|
|
|
|
error!("[File] delete upload file:{} error:{}", file_url, err);
|
2024-11-18 16:21:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
make_notification(StorageNotification::SingleFileLimitExceeded)
|
|
|
|
.payload(err.clone())
|
|
|
|
.send();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-20 07:44:57 +08:00
|
|
|
#[instrument(level = "debug", skip_all, err)]
|
|
|
|
async fn resume_upload(
|
2024-11-22 18:18:24 +08:00
|
|
|
storage_service: &StorageServiceImpl,
|
2024-06-20 07:44:57 +08:00
|
|
|
upload_file: UploadFileTable,
|
|
|
|
) -> FlowyResult<()> {
|
|
|
|
trace!(
|
|
|
|
"[File] resume upload for workspace: {}, parent_dir: {}, file_id: {}, local_file_path:{}",
|
|
|
|
upload_file.workspace_id,
|
|
|
|
upload_file.parent_dir,
|
|
|
|
upload_file.file_id,
|
|
|
|
upload_file.local_file_path
|
|
|
|
);
|
|
|
|
|
2024-11-22 18:18:24 +08:00
|
|
|
start_upload(storage_service, &upload_file).await?;
|
2024-11-18 16:21:47 +08:00
|
|
|
|
2024-06-20 07:44:57 +08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2024-06-24 14:19:36 +08:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2024-06-20 07:44:57 +08:00
|
|
|
#[instrument(level = "debug", skip_all)]
|
|
|
|
async fn upload_part(
|
|
|
|
cloud_service: &Arc<dyn StorageCloudService>,
|
|
|
|
user_service: &Arc<dyn StorageUserService>,
|
|
|
|
workspace_id: &str,
|
|
|
|
parent_dir: &str,
|
|
|
|
upload_id: &str,
|
|
|
|
file_id: &str,
|
|
|
|
part_number: i32,
|
|
|
|
body: Vec<u8>,
|
|
|
|
) -> Result<UploadPartResponse, FlowyError> {
|
|
|
|
let resp = cloud_service
|
|
|
|
.upload_part(
|
2024-06-24 14:19:36 +08:00
|
|
|
workspace_id,
|
|
|
|
parent_dir,
|
|
|
|
upload_id,
|
|
|
|
file_id,
|
2024-06-20 07:44:57 +08:00
|
|
|
part_number,
|
|
|
|
body,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
|
|
|
// save uploaded part to sqlite
|
|
|
|
let conn = user_service.sqlite_connection(user_service.user_id()?)?;
|
|
|
|
insert_upload_part(
|
|
|
|
conn,
|
|
|
|
&UploadFilePartTable {
|
|
|
|
upload_id: upload_id.to_string(),
|
|
|
|
e_tag: resp.e_tag.clone(),
|
|
|
|
part_num: resp.part_num,
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(resp)
|
|
|
|
}
|
|
|
|
|
|
|
|
async fn complete_upload(
|
|
|
|
cloud_service: &Arc<dyn StorageCloudService>,
|
|
|
|
user_service: &Arc<dyn StorageUserService>,
|
|
|
|
temp_storage: &Arc<FileTempStorage>,
|
|
|
|
upload_file: &UploadFileTable,
|
|
|
|
parts: Vec<CompletedPartRequest>,
|
2024-09-04 07:56:08 +08:00
|
|
|
global_notifier: &GlobalNotifier,
|
2024-06-20 07:44:57 +08:00
|
|
|
) -> Result<(), FlowyError> {
|
2024-11-06 11:18:40 +08:00
|
|
|
let file_url = cloud_service
|
|
|
|
.get_object_url_v1(
|
|
|
|
&upload_file.workspace_id,
|
|
|
|
&upload_file.parent_dir,
|
|
|
|
&upload_file.file_id,
|
|
|
|
)
|
|
|
|
.await?;
|
|
|
|
|
2024-11-07 00:00:54 +08:00
|
|
|
info!(
|
|
|
|
"[File]: completing file upload: {}, num parts: {}, url:{}",
|
|
|
|
upload_file.file_id,
|
|
|
|
parts.len(),
|
|
|
|
file_url
|
|
|
|
);
|
2024-06-20 07:44:57 +08:00
|
|
|
match cloud_service
|
|
|
|
.complete_upload(
|
|
|
|
&upload_file.workspace_id,
|
|
|
|
&upload_file.parent_dir,
|
|
|
|
&upload_file.upload_id,
|
|
|
|
&upload_file.file_id,
|
|
|
|
parts,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
Ok(_) => {
|
2024-08-11 20:39:25 +08:00
|
|
|
info!("[File] completed upload file: {}", upload_file.file_id);
|
2024-11-07 00:00:54 +08:00
|
|
|
let progress = FileProgress::new_progress(file_url, upload_file.file_id.clone(), 1.0);
|
2024-09-05 17:35:24 +08:00
|
|
|
info!(
|
|
|
|
"[File]: notify upload progress:{}, {}",
|
|
|
|
upload_file.file_id, progress
|
|
|
|
);
|
2024-11-06 11:18:40 +08:00
|
|
|
|
2024-09-05 17:35:24 +08:00
|
|
|
if let Err(err) = global_notifier.send(progress) {
|
2024-09-04 07:56:08 +08:00
|
|
|
error!("[File] send global notifier failed: {}", err);
|
|
|
|
}
|
|
|
|
|
2024-06-20 07:44:57 +08:00
|
|
|
let conn = user_service.sqlite_connection(user_service.user_id()?)?;
|
2024-08-20 15:18:57 +08:00
|
|
|
update_upload_file_completed(conn, &upload_file.upload_id)?;
|
2024-11-18 16:21:47 +08:00
|
|
|
|
2024-06-20 07:44:57 +08:00
|
|
|
if let Err(err) = temp_storage
|
|
|
|
.delete_temp_file(&upload_file.local_file_path)
|
|
|
|
.await
|
|
|
|
{
|
2024-11-18 16:21:47 +08:00
|
|
|
trace!("[File] delete temp file failed: {}", err);
|
2024-06-20 07:44:57 +08:00
|
|
|
}
|
|
|
|
},
|
|
|
|
Err(err) => {
|
2024-11-07 00:00:54 +08:00
|
|
|
error!("[File] complete upload failed: {}", err);
|
|
|
|
|
|
|
|
let progress =
|
|
|
|
FileProgress::new_error(file_url, upload_file.file_id.clone(), err.msg.clone());
|
2024-11-06 11:18:40 +08:00
|
|
|
if let Err(send_err) = global_notifier.send(progress) {
|
|
|
|
error!("[File] send global notifier failed: {}", send_err);
|
|
|
|
}
|
|
|
|
|
2024-11-22 18:18:24 +08:00
|
|
|
let mut conn = user_service.sqlite_connection(user_service.user_id()?)?;
|
|
|
|
if let Err(err) = delete_all_upload_parts(&mut conn, &upload_file.upload_id) {
|
2024-11-06 11:18:40 +08:00
|
|
|
error!("[File] delete all upload parts failed: {}", err);
|
|
|
|
}
|
|
|
|
return Err(err);
|
2024-06-20 07:44:57 +08:00
|
|
|
},
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|