001/*
002 * Syncany, www.syncany.org
003 * Copyright (C) 2011-2016 Philipp C. Heckel <philipp.heckel@gmail.com>
004 *
005 * This program is free software: you can redistribute it and/or modify
006 * it under the terms of the GNU General Public License as published by
007 * the Free Software Foundation, either version 3 of the License, or
008 * (at your option) any later version.
009 *
010 * This program is distributed in the hope that it will be useful,
011 * but WITHOUT ANY WARRANTY; without even the implied warranty of
012 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
013 * GNU General Public License for more details.
014 *
015 * You should have received a copy of the GNU General Public License
016 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
017 */
018package org.syncany.operations.down;
019
020import java.io.File;
021import java.io.IOException;
022import java.sql.SQLException;
023import java.util.AbstractMap;
024import java.util.Collection;
025import java.util.HashMap;
026import java.util.List;
027import java.util.Map;
028import java.util.Map.Entry;
029import java.util.Set;
030import java.util.SortedMap;
031import java.util.TreeMap;
032import java.util.logging.Level;
033import java.util.logging.Logger;
034
035import org.syncany.config.Config;
036import org.syncany.database.DatabaseVersion;
037import org.syncany.database.DatabaseVersionHeader;
038import org.syncany.database.MemoryDatabase;
039import org.syncany.database.MultiChunkEntry;
040import org.syncany.database.PartialFileHistory;
041import org.syncany.database.SqlDatabase;
042import org.syncany.database.VectorClock;
043import org.syncany.database.dao.DatabaseXmlSerializer;
044import org.syncany.database.dao.DatabaseXmlSerializer.DatabaseReadType;
045import org.syncany.operations.AbstractTransferOperation;
046import org.syncany.operations.cleanup.CleanupOperation;
047import org.syncany.operations.daemon.messages.DownChangesDetectedSyncExternalEvent;
048import org.syncany.operations.daemon.messages.DownDownloadFileSyncExternalEvent;
049import org.syncany.operations.daemon.messages.DownEndSyncExternalEvent;
050import org.syncany.operations.daemon.messages.DownStartSyncExternalEvent;
051import org.syncany.operations.down.DownOperationOptions.DownConflictStrategy;
052import org.syncany.operations.down.DownOperationResult.DownResultCode;
053import org.syncany.operations.ls_remote.LsRemoteOperation;
054import org.syncany.operations.ls_remote.LsRemoteOperationResult;
055import org.syncany.operations.up.UpOperation;
056import org.syncany.plugins.transfer.StorageException;
057import org.syncany.plugins.transfer.TransferManager;
058import org.syncany.plugins.transfer.files.CleanupRemoteFile;
059import org.syncany.plugins.transfer.files.DatabaseRemoteFile;
060
061import com.google.common.collect.Sets;
062import com.google.common.collect.Sets.SetView;
063
064/**
065 * The down operation implements a central part of Syncany's business logic. It determines
066 * whether other clients have uploaded new changes, downloads and compares these changes to
067 * the local database, and applies them locally. The down operation is the complement to the
068 * {@link UpOperation}.
069 *
070 * <p>The general operation flow is as follows:
071 * <ol>
072 *  <li>List all database versions on the remote storage using the {@link LsRemoteOperation}
073 *      (implemented in {@link #listUnknownRemoteDatabases() listUnknownRemoteDatabases()}</li>
074 *  <li>Download unknown databases using a {@link TransferManager} (if any), skip the rest down otherwise
075 *      (implemented in {@link #downloadUnknownRemoteDatabases(List) downloadUnknownRemoteDatabases()}</li>
076 *  <li>Load remote database headers (branches) and compare them to the local database to determine a winner
077 *      using several methods of the {@link DatabaseReconciliator}</li>
078 *  <li>Determine whether the local branch conflicts with the winner branch; if so, prune conflicting
079 *      local database versions (using {@link DatabaseReconciliator#findLosersPruneBranch(DatabaseBranch, DatabaseBranch)
080 *      findLosersPruneBranch()})</li>
081 *  <li>If the apply-changes-flag is switched on, changes are applied to the local file system using the
082 *      {@link ApplyChangesOperation}.</li>
083 *  <li>Save local database and update known database list (database files that do not need to be
084 *      downloaded anymore</li>
085 * </ol>
086 *
087 * @see DatabaseReconciliator
088 * @author Philipp C. Heckel (philipp.heckel@gmail.com)
089 */
090public class DownOperation extends AbstractTransferOperation {
091        private static final Logger logger = Logger.getLogger(DownOperation.class.getSimpleName());
092
093        public static final String ACTION_ID = "down";
094
095        private DownOperationOptions options;
096        private DownOperationResult result;
097
098        private SqlDatabase localDatabase;
099        private DatabaseReconciliator databaseReconciliator;
100        private DatabaseXmlSerializer databaseSerializer;
101
102        public DownOperation(Config config) {
103                this(config, new DownOperationOptions());
104        }
105
106        public DownOperation(Config config, DownOperationOptions options) {
107                super(config, ACTION_ID);
108
109                this.options = options;
110                this.result = new DownOperationResult();
111
112                this.localDatabase = new SqlDatabase(config);
113                this.databaseReconciliator = new DatabaseReconciliator();
114                this.databaseSerializer = new DatabaseXmlSerializer(config.getTransformer());
115        }
116
117        /**
118         * Executes the down operation, roughly following these steps:
119         *
120         * <ul>
121         *  <li>Download the remote databases to the local cache folder
122         *  <li>Read version headers (vector clocks)
123         *  <li>Determine winner branch
124         *  <li>Prune local stuff (if local conflicts exist)
125         *  <li>Apply winner's branch
126         *  <li>Write names of newly analyzed remote databases (so we don't download them again)
127         * </ul>
128         */
129        @Override
130        public DownOperationResult execute() throws Exception {
131                logger.log(Level.INFO, "");
132                logger.log(Level.INFO, "Running 'Sync down' at client " + config.getMachineName() + " ...");
133                logger.log(Level.INFO, "--------------------------------------------");
134
135                fireStartEvent();
136
137                if (!checkPreconditions()) {
138                        fireEndEvent();
139                        return result;
140                }
141
142                fireChangesDetectedEvent();
143                startOperation();
144
145                // If we do down, we are no longer allowed to resume a transaction
146                transferManager.clearResumableTransactions();
147                transferManager.clearPendingTransactions();
148
149                DatabaseBranch localBranch = localDatabase.getLocalDatabaseBranch();
150                List<DatabaseRemoteFile> newRemoteDatabases = result.getLsRemoteResult().getUnknownRemoteDatabases();
151
152                SortedMap<File, DatabaseRemoteFile> unknownRemoteDatabasesInCache = downloadUnknownRemoteDatabases(newRemoteDatabases);
153                SortedMap<DatabaseRemoteFile, List<DatabaseVersion>> remoteDatabaseHeaders = readUnknownDatabaseVersionHeaders(unknownRemoteDatabasesInCache);
154                Map<DatabaseVersionHeader, File> databaseVersionLocations = findDatabaseVersionLocations(remoteDatabaseHeaders, unknownRemoteDatabasesInCache);
155
156                Map<String, CleanupRemoteFile> remoteCleanupFiles = getRemoteCleanupFiles();
157                boolean cleanupOccurred = cleanupOccurred(remoteCleanupFiles);
158
159                List<PartialFileHistory> preDeleteFileHistoriesWithLastVersion = null;
160
161                if (cleanupOccurred) {
162                        logger.log(Level.INFO, "Cleanup occurred. Capturing local file histories, then deleting entire database ...");
163
164                        // Capture file histories
165                        preDeleteFileHistoriesWithLastVersion = localDatabase.getFileHistoriesWithLastVersion();
166
167                        // Get rid of local database
168                        localDatabase.deleteAll();
169
170                        // Normally, we wouldn't want to commit in the middle of an operation, but unfortunately
171                        // we have to, since not committing causes hanging in database operations, since UNCOMMITTED_READ
172                        // does not do enough magic to proceed. The commit in itself is not a problem, since we need
173                        // to redownload all remote data anyway.
174                        localDatabase.commit();
175
176                        // Set last cleanup values
177                        long lastRemoteCleanupNumber = getLastRemoteCleanupNumber(remoteCleanupFiles);
178
179                        localDatabase.writeCleanupNumber(lastRemoteCleanupNumber);
180                        localDatabase.writeCleanupTime(System.currentTimeMillis() / 1000);
181
182                        localBranch = new DatabaseBranch();
183                }
184
185                try {
186                        DatabaseBranches allBranches = populateDatabaseBranches(localBranch, remoteDatabaseHeaders);
187                        Map.Entry<String, DatabaseBranch> winnersBranch = determineWinnerBranch(allBranches);
188
189                        purgeConflictingLocalBranch(localBranch, winnersBranch);
190                        applyWinnersBranch(localBranch, winnersBranch, databaseVersionLocations, cleanupOccurred,
191                                        preDeleteFileHistoriesWithLastVersion);
192
193                        persistMuddyMultiChunks(winnersBranch, allBranches, databaseVersionLocations);
194                        removeNonMuddyMultiChunks();
195
196                        localDatabase.writeKnownRemoteDatabases(newRemoteDatabases);
197                        localDatabase.commit();
198                }
199                catch (Exception e) {
200                        localDatabase.rollback();
201                        throw e;
202                }
203
204                finishOperation();
205                fireEndEvent();
206
207                logger.log(Level.INFO, "Sync down done.");
208                return result;
209        }
210
211        private void fireStartEvent() {
212                eventBus.post(new DownStartSyncExternalEvent(config.getLocalDir().getAbsolutePath()));
213        }
214
215        private void fireChangesDetectedEvent() {
216                eventBus.post(new DownChangesDetectedSyncExternalEvent(config.getLocalDir().getAbsolutePath()));
217        }
218
219        private void fireEndEvent() {
220                eventBus.post(new DownEndSyncExternalEvent(config.getLocalDir().getAbsolutePath(), result.getResultCode(), result.getChangeSet()));
221        }
222
223        /**
224         * Checks whether any new databases are only and whether any other conflicting
225         * actions are running.
226         *
227         * <p>This method sets the result code in <code>result</code> according to the
228         * checking result and returns <code>true</code> if the rest of the operation can
229         * continue, <code>false</code> otherwise.
230         */
231        private boolean checkPreconditions() throws Exception {
232                // Check strategies
233                if (options.getConflictStrategy() != DownConflictStrategy.RENAME) {
234                        logger.log(Level.INFO, "Conflict strategy " + options.getConflictStrategy() + " not yet implemented.");
235                        result.setResultCode(DownResultCode.NOK);
236
237                        return false;
238                }
239
240                // Check if other operations are running
241                // We do this on purpose before LsRemote to prevent discrepancies
242                // between the LS result and the actual situation.
243                // This condition is so racy that it might not actually occur in
244                // practice, but it does in stresstests (#433)
245                if (otherRemoteOperationsRunning(CleanupOperation.ACTION_ID)) {
246                        logger.log(Level.INFO, "* Cleanup running. Skipping down operation.");
247                        result.setResultCode(DownResultCode.NOK);
248
249                        return false;
250                }
251
252                // Check which remote databases to download based on the last local vector clock
253                LsRemoteOperationResult lsRemoteResult = listUnknownRemoteDatabases();
254                result.setLsRemoteResult(lsRemoteResult);
255
256                if (lsRemoteResult.getUnknownRemoteDatabases().isEmpty()) {
257                        logger.log(Level.INFO, "* Nothing new. Skipping down operation.");
258                        result.setResultCode(DownResultCode.OK_NO_REMOTE_CHANGES);
259
260                        return false;
261                }
262
263
264
265                return true;
266        }
267
268        /**
269         * Lists unknown/new remote databases using the {@link LsRemoteOperation}.
270         */
271        private LsRemoteOperationResult listUnknownRemoteDatabases() throws Exception {
272                return new LsRemoteOperation(config, transferManager).execute();
273        }
274
275        /**
276         * Downloads the previously identified new/unknown remote databases to the local cache
277         * and returns a map with the local cache files mapped to the given remote database
278         * files. The method additionally fires events for every database it downloads.
279         */
280        private SortedMap<File, DatabaseRemoteFile> downloadUnknownRemoteDatabases(List<DatabaseRemoteFile> unknownRemoteDatabases)
281                        throws StorageException {
282
283                logger.log(Level.INFO, "Downloading unknown databases.");
284
285                SortedMap<File, DatabaseRemoteFile> unknownRemoteDatabasesInCache = new TreeMap<File, DatabaseRemoteFile>();
286                int downloadFileIndex = 0;
287
288                for (DatabaseRemoteFile remoteFile : unknownRemoteDatabases) {
289                        File unknownRemoteDatabaseFileInCache = config.getCache().getDatabaseFile(remoteFile.getName());
290                        DatabaseRemoteFile unknownDatabaseRemoteFile = new DatabaseRemoteFile(remoteFile.getName());
291
292                        logger.log(Level.INFO, "- Downloading {0} to local cache at {1}", new Object[] { remoteFile.getName(), unknownRemoteDatabaseFileInCache });
293                        eventBus.post(new DownDownloadFileSyncExternalEvent(config.getLocalDir().getAbsolutePath(), "database", ++downloadFileIndex,
294                                        unknownRemoteDatabases.size()));
295
296                        transferManager.download(unknownDatabaseRemoteFile, unknownRemoteDatabaseFileInCache);
297
298                        unknownRemoteDatabasesInCache.put(unknownRemoteDatabaseFileInCache, unknownDatabaseRemoteFile);
299                        result.getDownloadedUnknownDatabases().add(remoteFile.getName());
300                }
301
302                return unknownRemoteDatabasesInCache;
303        }
304
305        /**
306         * Read the given database files into individual per-user {@link DatabaseBranch}es. This method only
307         * reads the headers from the local database files, and not the entire databases into memory.
308         *
309         * <p>The returned database branches contain only the per-client {@link DatabaseVersionHeader}s, and not
310         * the entire stitched branches, i.e. A's database branch will only contain database version headers from A.
311         */
312        private SortedMap<DatabaseRemoteFile, List<DatabaseVersion>> readUnknownDatabaseVersionHeaders(SortedMap<File, DatabaseRemoteFile> remoteDatabases)
313                        throws IOException,
314                        StorageException {
315                logger.log(Level.INFO, "Loading database headers, creating branches ...");
316
317                // Read database files
318                SortedMap<DatabaseRemoteFile, List<DatabaseVersion>> remoteDatabaseHeaders = new TreeMap<DatabaseRemoteFile, List<DatabaseVersion>>();
319
320                for (Map.Entry<File, DatabaseRemoteFile> remoteDatabaseFileEntry : remoteDatabases.entrySet()) {
321                        MemoryDatabase remoteDatabase = new MemoryDatabase(); // Database cannot be reused, since these might be different clients
322
323                        File remoteDatabaseFileInCache = remoteDatabaseFileEntry.getKey();
324                        DatabaseRemoteFile remoteDatabaseFile = remoteDatabaseFileEntry.getValue();
325
326                        databaseSerializer.load(remoteDatabase, remoteDatabaseFileInCache, null, null, DatabaseReadType.HEADER_ONLY); // only load headers!
327
328                        remoteDatabaseHeaders.put(remoteDatabaseFile, remoteDatabase.getDatabaseVersions());
329                }
330
331                return remoteDatabaseHeaders;
332        }
333
334        /**
335         * This methods takes a Map containing DatabaseVersions (headers only) and loads these headers into {@link DatabaseBranches}.
336         * In addition, the local branch is added to this. The resulting DatabaseBranches will contain all headers exactly once,
337         * for the client that created that version.
338         *
339         * @param localBranch {@link DatabaseBranch} containing the locally known headers.
340         * @param remoteDatabaseHeaders Map from {@link DatabaseRemoteFile}s (important for client names) to the {@link DatabaseVersion}s that are
341         *        contained in these files.
342         *
343         * @return DatabaseBranches filled with all the headers that originated from either of the parameters.
344         */
345        private DatabaseBranches populateDatabaseBranches(DatabaseBranch localBranch,
346                        SortedMap<DatabaseRemoteFile, List<DatabaseVersion>> remoteDatabaseHeaders) {
347                DatabaseBranches allBranches = new DatabaseBranches();
348
349                allBranches.put(config.getMachineName(), localBranch.clone());
350
351                for (DatabaseRemoteFile remoteDatabaseFile : remoteDatabaseHeaders.keySet()) {
352
353                        // Populate branches
354                        DatabaseBranch remoteClientBranch = allBranches.getBranch(remoteDatabaseFile.getClientName(), true);
355
356                        for (DatabaseVersion remoteDatabaseVersion : remoteDatabaseHeaders.get(remoteDatabaseFile)) {
357                                DatabaseVersionHeader header = remoteDatabaseVersion.getHeader();
358                                remoteClientBranch.add(header);
359                        }
360                }
361
362                logger.log(Level.INFO, "Populated unknown branches: " + allBranches);
363                return allBranches;
364        }
365
366        /**
367         * This method uses the {@link DatabaseReconciliator} to compare the local database with the
368         * downloaded remote databases, in order to determine a winner. The winner's database versions
369         * will be applied locally.
370         *
371         * <p>For the comparison, the {@link DatabaseVersionHeader}s (mainly the {@link VectorClock}) of each
372         * database version are compared. Using these vector clocks, the underlying algorithms determine
373         * potential conflicts (between database versions, = simultaneous vector clocks), and resolve these
374         * conflicts by comparing local timestamps.
375         *
376         * <p>The detailed algorithm is described in the {@link DatabaseReconciliator}.
377         *
378         * @param allStitchedBranches The newly downloaded remote database version headers (= branches)
379         * @return Returns the branch of the winner
380         * @throws Exception If any kind of error occurs (...)
381         */
382        private Map.Entry<String, DatabaseBranch> determineWinnerBranch(DatabaseBranches allStitchedBranches)
383                        throws Exception {
384
385                logger.log(Level.INFO, "Determine winner using database reconciliator ...");
386                Entry<String, DatabaseBranch> winnersBranch = databaseReconciliator.findWinnerBranch(allStitchedBranches);
387
388                if (winnersBranch != null) {
389                        return winnersBranch;
390                }
391                else {
392                        return new AbstractMap.SimpleEntry<String, DatabaseBranch>("", new DatabaseBranch());
393                }
394        }
395
396        /**
397         * Marks locally conflicting database versions as <code>DIRTY</code> and removes remote databases that
398         * correspond to those database versions. This method uses the {@link DatabaseReconciliator}
399         * to determine whether there is a local purge branch.
400         */
401        private void purgeConflictingLocalBranch(DatabaseBranch localBranch, Entry<String, DatabaseBranch> winnersBranch) throws Exception {
402                DatabaseBranch localPurgeBranch = databaseReconciliator.findLosersPruneBranch(localBranch, winnersBranch.getValue());
403                logger.log(Level.INFO, "- Database versions to REMOVE locally: " + localPurgeBranch);
404
405                if (localPurgeBranch.size() == 0) {
406                        logger.log(Level.INFO, "  + Nothing to purge locally. No conflicts. Only updates. Nice!");
407                }
408                else {
409                        // Load dirty database (if existent)
410                        logger.log(Level.INFO, "  + Marking databases as DIRTY locally ...");
411
412                        for (DatabaseVersionHeader databaseVersionHeader : localPurgeBranch.getAll()) {
413                                logger.log(Level.INFO, "    * MASTER->DIRTY: " + databaseVersionHeader);
414                                localDatabase.markDatabaseVersionDirty(databaseVersionHeader.getVectorClock());
415
416                                boolean isOwnDatabaseVersionHeader = config.getMachineName().equals(databaseVersionHeader.getClient());
417
418                                if (isOwnDatabaseVersionHeader) {
419                                        String remoteFileToPruneClientName = config.getMachineName();
420                                        long remoteFileToPruneVersion = databaseVersionHeader.getVectorClock().getClock(config.getMachineName());
421                                        DatabaseRemoteFile remoteFileToPrune = new DatabaseRemoteFile(remoteFileToPruneClientName, remoteFileToPruneVersion);
422
423                                        logger.log(Level.INFO, "    * Deleting own remote database file " + remoteFileToPrune + " ...");
424                                        transferManager.delete(remoteFileToPrune);
425                                }
426                                else {
427                                        logger.log(Level.INFO, "    * NOT deleting any database file remotely (not our database!)");
428                                }
429
430                                result.getDirtyDatabasesCreated().add(databaseVersionHeader);
431                        }
432                }
433        }
434
435        /**
436         * Applies the winner's branch locally in the local database as well as on the local file system. To
437         * do so, it reads the winner's database, downloads newly required multichunks, determines file system actions
438         * and applies these actions locally.
439         * @param cleanupOccurred
440         * @param preDeleteFileHistoriesWithLastVersion
441         */
442        private void applyWinnersBranch(DatabaseBranch localBranch, Entry<String, DatabaseBranch> winnersBranch,
443                        Map<DatabaseVersionHeader, File> databaseVersionLocations, boolean cleanupOccurred,
444                        List<PartialFileHistory> preDeleteFileHistoriesWithLastVersion) throws Exception {
445
446                DatabaseBranch winnersApplyBranch = databaseReconciliator.findWinnersApplyBranch(localBranch, winnersBranch.getValue());
447
448                logger.log(Level.INFO, "- Cleanup occurred: " + cleanupOccurred);
449                logger.log(Level.INFO, "- Database versions to APPLY locally: " + winnersApplyBranch);
450
451                boolean remoteChangesOccurred = winnersApplyBranch.size() > 0 || cleanupOccurred;
452
453                if (!remoteChangesOccurred) {
454                        logger.log(Level.WARNING, "  + Nothing to update. Nice!");
455                        result.setResultCode(DownResultCode.OK_NO_REMOTE_CHANGES);
456                }
457                else {
458                        logger.log(Level.INFO, "Loading winners database (DEFAULT) ...");
459                        DatabaseFileReader databaseFileReader = new DatabaseFileReader(databaseSerializer, winnersApplyBranch, databaseVersionLocations);
460
461                        boolean noDatabaseVersions = !databaseFileReader.hasNext();
462                        
463                        if (noDatabaseVersions) {
464                                applyChangesAndPersistDatabase(new MemoryDatabase(), cleanupOccurred, preDeleteFileHistoriesWithLastVersion);
465                        } 
466                        else {
467                                while (databaseFileReader.hasNext()) {
468                                        MemoryDatabase winnersDatabase = databaseFileReader.next();
469                                        applyChangesAndPersistDatabase(winnersDatabase, cleanupOccurred, preDeleteFileHistoriesWithLastVersion);                                        
470                                }
471                        }
472
473                        result.setResultCode(DownResultCode.OK_WITH_REMOTE_CHANGES);
474                }
475        }
476
477        private void applyChangesAndPersistDatabase(MemoryDatabase winnersDatabase, boolean cleanupOccurred, 
478                        List<PartialFileHistory> preDeleteFileHistoriesWithLastVersion) throws Exception {
479                
480                if (options.isApplyChanges()) {
481                        new ApplyChangesOperation(config, localDatabase, transferManager, winnersDatabase, result, cleanupOccurred,
482                                        preDeleteFileHistoriesWithLastVersion).execute();
483                }
484                else {
485                        logger.log(Level.INFO, "Doing nothing on the file system, because --no-apply switched on");
486                }
487
488                // We only persist the versions that we have already applied.
489                DatabaseBranch currentApplyBranch = new DatabaseBranch();
490                for (DatabaseVersion databaseVersion : winnersDatabase.getDatabaseVersions()) {
491                        currentApplyBranch.add(databaseVersion.getHeader());
492                }
493
494                persistDatabaseVersions(currentApplyBranch, winnersDatabase);
495                localDatabase.commit();
496        }
497
498        /**
499         * Persists the given winners branch to the local database, i.e. for every database version
500         * in the winners branch, all contained multichunks, chunks, etc. are added to the local SQL
501         * database.
502         *
503         * <p>This method applies both regular database versions as well as purge database versions.
504         */
505        private void persistDatabaseVersions(DatabaseBranch winnersApplyBranch, MemoryDatabase winnersDatabase)
506                        throws SQLException {
507
508                // Add winners database to local database
509                // Note: This must happen AFTER the file system stuff, because we compare the winners database with the local database!
510                logger.log(Level.INFO, "- Adding database versions to SQL database ...");
511
512                for (DatabaseVersionHeader currentDatabaseVersionHeader : winnersApplyBranch.getAll()) {
513                        persistDatabaseVersion(winnersDatabase, currentDatabaseVersionHeader);
514                }
515        }
516
517        /**
518         * Persists a regular database version to the local database by using
519         * {@link SqlDatabase#writeDatabaseVersion(DatabaseVersion)}.
520         */
521        private void persistDatabaseVersion(MemoryDatabase winnersDatabase, DatabaseVersionHeader currentDatabaseVersionHeader) {
522                logger.log(Level.INFO, "  + Applying database version " + currentDatabaseVersionHeader.getVectorClock());
523
524                DatabaseVersion applyDatabaseVersion = winnersDatabase.getDatabaseVersion(currentDatabaseVersionHeader.getVectorClock());
525                logger.log(Level.FINE, "  + Contents: " + applyDatabaseVersion);
526                localDatabase.writeDatabaseVersion(applyDatabaseVersion);
527        }
528
529        /**
530         * Identifies and persists 'muddy' multichunks to the local database. Muddy multichunks are multichunks
531         * that have been referenced by DIRTY database versions and might be reused in future database versions when
532         * the other client cleans up its mess (performs another 'up').
533         */
534        private void persistMuddyMultiChunks(Entry<String, DatabaseBranch> winnersBranch, DatabaseBranches allStitchedBranches,
535                        Map<DatabaseVersionHeader, File> databaseVersionLocations) throws StorageException, IOException, SQLException {
536                // Find dirty database versions (from other clients!) and load them from files
537                Map<DatabaseVersionHeader, Collection<MultiChunkEntry>> muddyMultiChunksPerDatabaseVersion = new HashMap<>();
538                Set<DatabaseVersionHeader> winnersDatabaseVersionHeaders = Sets.newHashSet(winnersBranch.getValue().getAll());
539
540                for (String otherClientName : allStitchedBranches.getClients()) {
541                        boolean isLocalMachine = config.getMachineName().equals(otherClientName);
542
543                        if (!isLocalMachine) {
544                                DatabaseBranch otherClientBranch = allStitchedBranches.getBranch(otherClientName);
545                                Set<DatabaseVersionHeader> otherClientDatabaseVersionHeaders = Sets.newHashSet(otherClientBranch.getAll());
546
547                                SetView<DatabaseVersionHeader> otherMuddyDatabaseVersionHeaders = Sets.difference(otherClientDatabaseVersionHeaders,
548                                                winnersDatabaseVersionHeaders);
549                                boolean hasMuddyDatabaseVersionHeaders = otherMuddyDatabaseVersionHeaders.size() > 0;
550
551                                if (hasMuddyDatabaseVersionHeaders) {
552                                        logger.log(Level.INFO, "DIRTY database version headers of " + otherClientName + ":  " + otherMuddyDatabaseVersionHeaders);
553
554                                        for (DatabaseVersionHeader muddyDatabaseVersionHeader : otherMuddyDatabaseVersionHeaders) {
555                                                MemoryDatabase muddyMultiChunksDatabase = new MemoryDatabase();
556
557                                                File localFileForMuddyDatabaseVersion = databaseVersionLocations.get(muddyDatabaseVersionHeader);
558                                                VectorClock fromVersion = muddyDatabaseVersionHeader.getVectorClock();
559                                                VectorClock toVersion = muddyDatabaseVersionHeader.getVectorClock();
560
561                                                logger.log(Level.INFO, "  - Loading " + muddyDatabaseVersionHeader + " from file " + localFileForMuddyDatabaseVersion);
562                                                databaseSerializer.load(muddyMultiChunksDatabase, localFileForMuddyDatabaseVersion, fromVersion, toVersion,
563                                                                DatabaseReadType.FULL);
564
565                                                boolean hasMuddyMultiChunks = muddyMultiChunksDatabase.getMultiChunks().size() > 0;
566
567                                                if (hasMuddyMultiChunks) {
568                                                        muddyMultiChunksPerDatabaseVersion.put(muddyDatabaseVersionHeader, muddyMultiChunksDatabase.getMultiChunks());
569                                                }
570                                        }
571
572                                }
573                        }
574                }
575
576                // Add muddy multichunks to 'multichunks_muddy' database table
577                boolean hasMuddyMultiChunks = muddyMultiChunksPerDatabaseVersion.size() > 0;
578
579                if (hasMuddyMultiChunks) {
580                        localDatabase.writeMuddyMultiChunks(muddyMultiChunksPerDatabaseVersion);
581                }
582        }
583
584        /**
585         * Removes multichunks from the 'muddy' table as soon as they because present in the
586         * actual multichunk database table.
587         */
588        private void removeNonMuddyMultiChunks() throws SQLException {
589                // TODO [medium] This might not get the right multichunks. Rather use the database version information in the multichunk_muddy table.
590                localDatabase.removeNonMuddyMultiChunks();
591        }
592
593        /**
594         * This methods takes a Map from {@link DatabaseRemoteFile}s to Lists of {@link DatabaseVersion}s and produces more or less
595         * the reverse Map, which can be used to find the cached copy of a remote databasefile, given a {@link DatabaseVersionHeader}.
596         *
597         * @param remoteDatabaseHeaders mapping remote database files to the versions they contain.
598         * @param databaseRemoteFilesInCache mapping files to the database remote file that is cached in it.
599         *
600         * @return databaseVersionLocations a Map from {@link DatabaseVersionHeader}s to the local File in which that version can be found.
601         */
602        private Map<DatabaseVersionHeader, File> findDatabaseVersionLocations(Map<DatabaseRemoteFile, List<DatabaseVersion>> remoteDatabaseHeaders,
603                        Map<File, DatabaseRemoteFile> databaseRemoteFilesInCache) {
604
605                Map<DatabaseVersionHeader, File> databaseVersionLocations = new HashMap<DatabaseVersionHeader, File>();
606
607                for (File databaseFile : databaseRemoteFilesInCache.keySet()) {
608                        DatabaseRemoteFile databaseRemoteFile = databaseRemoteFilesInCache.get(databaseFile);
609                        for (DatabaseVersion databaseVersion : remoteDatabaseHeaders.get(databaseRemoteFile)) {
610                                databaseVersionLocations.put(databaseVersion.getHeader(), databaseFile);
611                        }
612                }
613
614                return databaseVersionLocations;
615        }
616
617        private Map<String, CleanupRemoteFile> getRemoteCleanupFiles() throws StorageException {
618                return transferManager.list(CleanupRemoteFile.class);
619        }
620
621        /**
622         * This method queries the local database and compares the result to existing remoteCleanupFiles to determine
623         * if cleanup has occurred since the last time it was locally handled. The cleanupNumber is a simple count.
624         */
625        private boolean cleanupOccurred(Map<String, CleanupRemoteFile> remoteCleanupFiles) throws Exception {
626                Long lastRemoteCleanupNumber = getLastRemoteCleanupNumber(remoteCleanupFiles);
627                Long lastLocalCleanupNumber = localDatabase.getCleanupNumber();
628
629                if (lastLocalCleanupNumber != null) {
630                        return lastRemoteCleanupNumber > lastLocalCleanupNumber;
631                }
632                else {
633                        return lastRemoteCleanupNumber > 0;
634                }
635        }
636}