diff --git a/src/fs_store.rs b/src/fs_store.rs index 4a453d14..5bff5359 100644 --- a/src/fs_store.rs +++ b/src/fs_store.rs @@ -151,20 +151,20 @@ impl FsStore { Ok(()) } - pub fn compact(&self, id: &DocumentId, full_doc: &[u8]) -> Result<(), Error> { + pub fn compact( + &self, + id: &DocumentId, + full_doc: &[u8], + new_heads: Vec, + ) -> Result<(), Error> { let paths = DocIdPaths::from(id); // Load all the data we have into a doc match Chunks::load(&self.root, id) { Ok(Some(chunks)) => { - let doc = chunks - .to_doc() - .map_err(|e| Error(ErrorKind::LoadDocToCompact(e)))?; - // Write the snapshot - let output_chunk_name = SavedChunkName::new_snapshot(doc.get_heads()); - let chunk = doc.save(); - write_chunk(&self.root, &paths, &chunk, output_chunk_name.clone())?; + let output_chunk_name = SavedChunkName::new_snapshot(new_heads); + write_chunk(&self.root, &paths, full_doc, output_chunk_name.clone())?; // Remove all the old data for incremental in chunks.incrementals.keys() { @@ -172,15 +172,9 @@ impl FsStore { std::fs::remove_file(&path) .map_err(|e| Error(ErrorKind::DeleteChunk(path, e)))?; } - let just_wrote = paths.chunk_path(&self.root, &output_chunk_name); for snapshot in chunks.snapshots.keys() { let path = paths.chunk_path(&self.root, snapshot); - if path == just_wrote { - tracing::trace!("Somehow trying to delete the same path we just wrote to. Not today Satan"); - continue; - } - std::fs::remove_file(&path) .map_err(|e| Error(ErrorKind::DeleteChunk(path, e)))?; } @@ -441,21 +435,6 @@ impl Chunks { incrementals, })) } - - fn to_doc(&self) -> Result { - let mut bytes = Vec::new(); - for chunk in self.snapshots.values() { - bytes.extend(chunk); - } - for chunk in self.incrementals.values() { - bytes.extend(chunk); - } - - automerge::Automerge::load_with_options( - &bytes, - automerge::LoadOptions::new().on_partial_load(automerge::OnPartialLoad::Ignore), - ) - } } mod error { @@ -499,8 +478,6 @@ mod error { ErrReadingChunkFile(PathBuf, std::io::Error), #[error("error creating level 2 path {0}: {1}")] CreateLevel2Path(PathBuf, std::io::Error), - #[error("error loading doc to compact: {0}")] - LoadDocToCompact(automerge::AutomergeError), #[error("error creating temp file: {0}")] CreateTempFile(std::io::Error), #[error("error writing temp file {0}: {1}")] diff --git a/src/interfaces.rs b/src/interfaces.rs index 8fc7b9a2..3c72d1a9 100644 --- a/src/interfaces.rs +++ b/src/interfaces.rs @@ -1,3 +1,4 @@ +use automerge::ChangeHash; use futures::future::BoxFuture; use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; @@ -113,5 +114,6 @@ pub trait Storage: Send { &self, _id: DocumentId, _full_doc: Vec, + _new_heads: Vec, ) -> BoxFuture<'static, Result<(), StorageError>>; } diff --git a/src/repo.rs b/src/repo.rs index 3b9acae5..52fe588f 100644 --- a/src/repo.rs +++ b/src/repo.rs @@ -743,7 +743,10 @@ impl DocumentInfo { (doc.automerge.save(), doc.automerge.get_heads()) }; self.patches_since_last_compact = 0; - (storage.compact(document_id.clone(), to_save), new_heads) + ( + storage.compact(document_id.clone(), to_save, new_heads.clone()), + new_heads, + ) } else { let (to_save, new_heads) = { let doc = self.document.read(); diff --git a/src/tokio/fs_storage.rs b/src/tokio/fs_storage.rs index 38afd348..27bf3710 100644 --- a/src/tokio/fs_storage.rs +++ b/src/tokio/fs_storage.rs @@ -78,11 +78,17 @@ impl Storage for FsStorage { &self, id: crate::DocumentId, full_doc: Vec, + new_heads: Vec, ) -> BoxFuture<'static, Result<(), StorageError>> { let inner = Arc::clone(&self.inner); let inner_id = id.clone(); self.handle - .spawn_blocking(move || inner.lock().unwrap().compact(&inner_id, &full_doc)) + .spawn_blocking(move || { + inner + .lock() + .unwrap() + .compact(&inner_id, &full_doc, &new_heads) + }) .map(handle_joinerror) .map_err(move |e| { tracing::error!(err=?e, doc=?id, "error compacting chunk to filesystem"); diff --git a/test_utils/src/storage_utils.rs b/test_utils/src/storage_utils.rs index fd0365cc..c43707c2 100644 --- a/test_utils/src/storage_utils.rs +++ b/test_utils/src/storage_utils.rs @@ -1,3 +1,4 @@ +use automerge::ChangeHash; use automerge_repo::{DocumentId, Storage, StorageError}; use futures::future::{BoxFuture, TryFutureExt}; use futures::FutureExt; @@ -30,6 +31,7 @@ impl Storage for SimpleStorage { &self, _id: DocumentId, _chunk: Vec, + _new_heads: Vec, ) -> BoxFuture<'static, Result<(), StorageError>> { futures::future::ready(Ok(())).boxed() } @@ -76,6 +78,7 @@ impl Storage for InMemoryStorage { &self, id: DocumentId, full_doc: Vec, + _new_heads: Vec, ) -> BoxFuture<'static, Result<(), StorageError>> { let mut documents = self.documents.lock(); documents.insert(id, full_doc); @@ -202,6 +205,7 @@ impl Storage for AsyncInMemoryStorage { &self, id: DocumentId, full_doc: Vec, + _new_heads: Vec, ) -> BoxFuture<'static, Result<(), StorageError>> { let (tx, rx) = oneshot(); self.chan diff --git a/tests/document_changed.rs b/tests/document_changed.rs index 7eb74a55..eae1f14a 100644 --- a/tests/document_changed.rs +++ b/tests/document_changed.rs @@ -87,7 +87,6 @@ fn test_document_changed_over_sync() { // Spawn a task that awaits the document change. let (done_sync_sender, mut done_sync_receiver) = channel(1); - let repo_id = repo_handle_1.get_repo_id().clone(); rt.spawn(async move { loop { // Await changes until the edit comes through over sync. diff --git a/tests/fs_storage/main.rs b/tests/fs_storage/main.rs index 74a6d78e..e39ad4b3 100644 --- a/tests/fs_storage/main.rs +++ b/tests/fs_storage/main.rs @@ -1,7 +1,6 @@ use automerge::transaction::Transactable; use automerge_repo::fs_store; use itertools::Itertools; -use uuid::Uuid; /// Asserts that the &[u8] in `data` is some permutation of the chunks of Vec<&[u8> in `expected` macro_rules! assert_permutation_of { @@ -45,7 +44,9 @@ fn fs_store_crud() { assert_permutation_of!(result, vec![change1.bytes(), change2.bytes()]); // now compact - store.compact(&doc_id, &[]).unwrap(); + store + .compact(&doc_id, &doc.save(), doc.get_heads()) + .unwrap(); let result = store.get(&doc_id).unwrap().unwrap(); let expected = doc.save(); assert_eq!(result, expected);