001/*
002 * Syncany, www.syncany.org
003 * Copyright (C) 2011-2016 Philipp C. Heckel <philipp.heckel@gmail.com>
004 *
005 * This program is free software: you can redistribute it and/or modify
006 * it under the terms of the GNU General Public License as published by
007 * the Free Software Foundation, either version 3 of the License, or
008 * (at your option) any later version.
009 *
010 * This program is distributed in the hope that it will be useful,
011 * but WITHOUT ANY WARRANTY; without even the implied warranty of
012 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
013 * GNU General Public License for more details.
014 *
015 * You should have received a copy of the GNU General Public License
016 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
017 */
018package org.syncany.operations.cleanup;
019
020import java.io.File;
021import java.io.IOException;
022import java.sql.SQLException;
023import java.util.ArrayList;
024import java.util.Collections;
025import java.util.HashMap;
026import java.util.Iterator;
027import java.util.List;
028import java.util.Map;
029import java.util.SortedMap;
030import java.util.TreeMap;
031import java.util.logging.Level;
032import java.util.logging.Logger;
033
034import org.syncany.chunk.Chunk;
035import org.syncany.chunk.MultiChunk;
036import org.syncany.config.Config;
037import org.syncany.database.DatabaseVersion;
038import org.syncany.database.FileContent;
039import org.syncany.database.FileVersion;
040import org.syncany.database.MultiChunkEntry;
041import org.syncany.database.MultiChunkEntry.MultiChunkId;
042import org.syncany.database.PartialFileHistory;
043import org.syncany.database.PartialFileHistory.FileHistoryId;
044import org.syncany.database.SqlDatabase;
045import org.syncany.database.dao.DatabaseXmlSerializer;
046import org.syncany.database.dao.FileVersionSqlDao;
047import org.syncany.operations.AbstractTransferOperation;
048import org.syncany.operations.cleanup.CleanupOperationOptions.TimeUnit;
049import org.syncany.operations.cleanup.CleanupOperationResult.CleanupResultCode;
050import org.syncany.operations.daemon.messages.CleanupEndSyncExternalEvent;
051import org.syncany.operations.daemon.messages.CleanupStartCleaningSyncExternalEvent;
052import org.syncany.operations.daemon.messages.CleanupStartSyncExternalEvent;
053import org.syncany.operations.down.DownOperation;
054import org.syncany.operations.ls_remote.LsRemoteOperation;
055import org.syncany.operations.ls_remote.LsRemoteOperationResult;
056import org.syncany.operations.status.StatusOperation;
057import org.syncany.operations.status.StatusOperationResult;
058import org.syncany.operations.up.BlockingTransfersException;
059import org.syncany.operations.up.UpOperation;
060import org.syncany.plugins.transfer.RemoteTransaction;
061import org.syncany.plugins.transfer.StorageException;
062import org.syncany.plugins.transfer.files.CleanupRemoteFile;
063import org.syncany.plugins.transfer.files.DatabaseRemoteFile;
064import org.syncany.plugins.transfer.files.MultichunkRemoteFile;
065import org.syncany.plugins.transfer.files.RemoteFile;
066
067/**
068 * The purpose of the cleanup operation is to keep the local database and the
069 * remote repository clean -- thereby allowing it to be used indefinitely without
070 * any performance issues or storage shortage.
071 *
072 * <p>The responsibilities of the cleanup operations include:
073 * <ul>
074 *   <li>Remove old {@link FileVersion} and their corresponding database entities.
075 *       In particular, it also removes {@link PartialFileHistory}s, {@link FileContent}s,
076 *       {@link Chunk}s and {@link MultiChunk}s.</li>
077 *   <li>Merge metadata of a single client and remove old database version files
078 *       from the remote storage.</li>
079 * </ul>
080 *
081 * <p>High level strategy:
082 * <ul>
083 *    <li>Lock repo and start thread that renews the lock every X seconds</li>
084 *    <li>Find old versions / contents / ... from database</li>
085 *    <li>Delete these versions and contents locally</li>
086 *    <li>Delete all remote metadata</li>
087 *    <li>Obtain consistent database files from local database</li>
088 *    <li>Upload new database files to repo</li>
089 *    <li>Remotely delete unused multichunks</li>
090 *    <li>Stop lock renewal thread and unlock repo</li>
091 * </ul>
092 *
093 * <p><b>Important issues:</b>
094 * All remote operations MUST check if the lock has been recently renewed. If it hasn't, the connection has been lost.
095 *
096 * @author Philipp C. Heckel (philipp.heckel@gmail.com)
097 */
098public class CleanupOperation extends AbstractTransferOperation {
099        private static final Logger logger = Logger.getLogger(CleanupOperation.class.getSimpleName());
100
101        public static final String ACTION_ID = "cleanup";
102        private static final int BEFORE_DOUBLE_CHECK_TIME = 1200;
103
104        private CleanupOperationOptions options;
105        private CleanupOperationResult result;
106
107        private SqlDatabase localDatabase;
108        private RemoteTransaction remoteTransaction;
109
110        public CleanupOperation(Config config) {
111                this(config, new CleanupOperationOptions());
112        }
113
114        public CleanupOperation(Config config, CleanupOperationOptions options) {
115                super(config, ACTION_ID);
116
117                this.options = options;
118                this.result = new CleanupOperationResult();
119                this.localDatabase = new SqlDatabase(config);
120        }
121
122        @Override
123        public CleanupOperationResult execute() throws Exception {
124                logger.log(Level.INFO, "");
125                logger.log(Level.INFO, "Running 'Cleanup' at client " + config.getMachineName() + " ...");
126                logger.log(Level.INFO, "--------------------------------------------");
127
128                // Do initial check out remote repository preconditions
129                CleanupResultCode preconditionResult = checkPreconditions();
130
131                fireStartEvent();
132                if (preconditionResult != CleanupResultCode.OK) {
133                        fireEndEvent();
134                        return new CleanupOperationResult(preconditionResult);
135                }
136
137                fireCleanupNeededEvent();
138
139                // At this point, the operation will lock the repository
140                startOperation();
141
142                // If there are any, rollback any existing/old transactions.
143                // If other clients have unfinished transactions with deletions, do not proceed.
144                try {
145                        transferManager.cleanTransactions();
146                }
147                catch (BlockingTransfersException ignored) {
148                        finishOperation();
149                        fireEndEvent();
150                        return new CleanupOperationResult(CleanupResultCode.NOK_REPO_BLOCKED);
151                }
152
153                // Wait two seconds (conservative cleanup, see #104)
154                logger.log(Level.INFO, "Cleanup: Waiting a while to be sure that no other actions are running ...");
155                Thread.sleep(BEFORE_DOUBLE_CHECK_TIME);
156
157                // Check again. No other clients should be busy, because we waited BEFORE_DOUBLE_CHECK_TIME
158                preconditionResult = checkPreconditions();
159
160                if (preconditionResult != CleanupResultCode.OK) {
161                        finishOperation();
162                        fireEndEvent();
163                        return new CleanupOperationResult(preconditionResult);
164                }
165
166                // If we do cleanup, we are no longer allowed to resume a transaction
167                transferManager.clearResumableTransactions();
168                transferManager.clearPendingTransactions();
169
170                // Now do the actual work!
171                logger.log(Level.INFO, "Cleanup: Starting transaction.");
172                remoteTransaction = new RemoteTransaction(config, transferManager);
173
174                removeOldVersions();
175
176                if (options.isRemoveUnreferencedTemporaryFiles()) {
177                        transferManager.removeUnreferencedTemporaryFiles();
178                }
179
180                mergeRemoteFiles();
181
182                // We went succesfully through the entire operation and checked everything. Hence we update the last cleanup time.
183                updateLastCleanupTime();
184
185                finishOperation();
186                fireEndEvent();
187
188                return updateResultCode(result);
189        }
190
191        /**
192         * This method checks if we have changed anything and sets the
193         * {@link CleanupResultCode} of the given result accordingly.
194         *
195         * @param result The result so far in this operation.
196         * @return result The original result, with the relevant {@link CleanupResultCode}
197         */
198        private CleanupOperationResult updateResultCode(CleanupOperationResult result) {
199                if (result.getMergedDatabaseFilesCount() > 0 || result.getRemovedMultiChunksCount() > 0 || result.getRemovedOldVersionsCount() > 0) {
200                        result.setResultCode(CleanupResultCode.OK);
201                }
202                else {
203                        result.setResultCode(CleanupResultCode.OK_NOTHING_DONE);
204                }
205
206                return result;
207        }
208
209        private void fireStartEvent() {
210                eventBus.post(new CleanupStartSyncExternalEvent(config.getLocalDir().getAbsolutePath()));
211        }
212
213        private void fireCleanupNeededEvent() {
214                eventBus.post(new CleanupStartCleaningSyncExternalEvent(config.getLocalDir().getAbsolutePath()));
215        }
216
217        private void fireEndEvent() {
218                eventBus.post(new CleanupEndSyncExternalEvent(config.getLocalDir().getAbsolutePath(), result));
219        }
220
221        /**
222         * This method inspects the local database and remote repository to
223         * see if cleanup should be performed.
224         *
225         * @return {@link CleanupResultCode.OK} if nothing prevents continuing, another relevant code otherwise.
226         */
227        private CleanupResultCode checkPreconditions() throws Exception {
228                if (hasDirtyDatabaseVersions()) {
229                        return CleanupResultCode.NOK_DIRTY_LOCAL;
230                }
231
232                if (!options.isForce() && wasCleanedRecently()) {
233                        return CleanupResultCode.NOK_RECENTLY_CLEANED;
234                }
235
236                if (hasLocalChanges()) {
237                        return CleanupResultCode.NOK_LOCAL_CHANGES;
238                }
239
240                if (hasRemoteChanges()) {
241                        return CleanupResultCode.NOK_REMOTE_CHANGES;
242                }
243
244                if (otherRemoteOperationsRunning(CleanupOperation.ACTION_ID, UpOperation.ACTION_ID, DownOperation.ACTION_ID)) {
245                        return CleanupResultCode.NOK_OTHER_OPERATIONS_RUNNING;
246                }
247
248                return CleanupResultCode.OK;
249        }
250
251        private boolean hasLocalChanges() throws Exception {
252                StatusOperationResult statusOperationResult = new StatusOperation(config, options.getStatusOptions()).execute();
253                return statusOperationResult.getChangeSet().hasChanges();
254        }
255
256        /**
257         * This method checks if there exist {@link FileVersion}s which are to be deleted because the history they are a part
258         * of is too long. It will collect these, remove them locally and add them to the {@link RemoteTransaction} for deletion.
259         */
260        private void removeOldVersions() throws Exception {
261                Map<FileHistoryId, List<FileVersion>> purgeFileVersions = new TreeMap<FileHistoryId, List<FileVersion>>();
262                Map<FileHistoryId, FileVersion> purgeBeforeFileVersions = new TreeMap<FileHistoryId, FileVersion>();
263
264                if (options.isRemoveVersionsByInterval()) {
265                        // Get file versions that should be purged according to the settings that are given. Time-based.
266                        purgeFileVersions = collectPurgableFileVersions();
267                }
268
269                if (options.isRemoveOldVersions()) {
270                        // Get all non-final fileversions and deleted (final) fileversions that we want to fully delete.
271                        // purgeFileVersions is modified here!
272                        purgeBeforeFileVersions = collectPurgeBeforeFileVersions(purgeFileVersions);
273                }
274                if (purgeFileVersions.isEmpty() && purgeBeforeFileVersions.isEmpty()) {
275                        logger.log(Level.INFO, "- Old version removal: Not necessary.");
276                        return;
277                }
278
279                logger.log(Level.INFO, "- Old version removal: Found {0} file histories and {1} file versions that need cleaning.", new Object[] {
280                                purgeFileVersions.size(),
281                                purgeBeforeFileVersions.size() });
282
283                // Local: First, remove file versions that are not longer needed
284                localDatabase.removeSmallerOrEqualFileVersions(purgeBeforeFileVersions);
285                localDatabase.removeFileVersions(purgeFileVersions);
286
287                // Local: Then, determine what must be changed remotely and remove it locally
288                Map<MultiChunkId, MultiChunkEntry> unusedMultiChunks = localDatabase.getUnusedMultiChunks();
289
290                localDatabase.removeUnreferencedDatabaseEntities();
291                deleteUnusedRemoteMultiChunks(unusedMultiChunks);
292
293                // Update stats
294                long unusedMultiChunkSize = 0;
295
296                for (MultiChunkEntry removedMultiChunk : unusedMultiChunks.values()) {
297                        unusedMultiChunkSize += removedMultiChunk.getSize();
298                }
299
300                result.setRemovedOldVersionsCount(purgeBeforeFileVersions.size() + purgeFileVersions.size());
301                result.setRemovedMultiChunksCount(unusedMultiChunks.size());
302                result.setRemovedMultiChunksSize(unusedMultiChunkSize);
303        }
304
305        private Map<FileHistoryId, FileVersion> collectPurgeBeforeFileVersions(Map<FileHistoryId, List<FileVersion>> purgeFileVersions) {
306                long deleteBeforeTimestamp = System.currentTimeMillis() - options.getMinKeepDeletedSeconds() * 1000;
307                
308                Map<FileHistoryId, FileVersion> deletedFileVersionsBeforeTimestamp = localDatabase.getDeletedFileVersionsBefore(deleteBeforeTimestamp);
309                Map<FileHistoryId, List<FileVersion>> selectedPurgeFileVersions = localDatabase.getFileHistoriesToPurgeBefore(deleteBeforeTimestamp);
310                
311                Map<FileHistoryId, FileVersion> purgeBeforeFileVersions = new HashMap<FileHistoryId, FileVersion>();
312                purgeBeforeFileVersions.putAll(deletedFileVersionsBeforeTimestamp);
313                putAllFileVersionsInMap(selectedPurgeFileVersions, purgeFileVersions);
314                
315                return purgeBeforeFileVersions;
316        }
317
318        /**
319         * For all time intervals defined in the purge file settings, determine the eligible file
320         * versions to be purged -- namely all but the newest one.
321         * 
322         * @see CleanupOperation 
323         * @see CleanupOperationOptions#getPurgeFileVersionSettings()
324         * @see FileVersionSqlDao#getFileHistoriesToPurgeInInterval(long, long, TimeUnit)
325         */
326        private Map<FileHistoryId, List<FileVersion>> collectPurgableFileVersions() {
327                Map<FileHistoryId, List<FileVersion>> purgeFileVersions = new HashMap<FileHistoryId, List<FileVersion>>();
328
329                long currentTime = System.currentTimeMillis();
330                long previousTruncateIntervalTimeMultiplier = 0;                
331                
332                for (Map.Entry<Long, TimeUnit> purgeFileVersionSetting : options.getPurgeFileVersionSettings().entrySet()) {
333                        Long truncateIntervalMultiplier = purgeFileVersionSetting.getKey();
334                        TimeUnit truncateIntervalTimeUnit = purgeFileVersionSetting.getValue();                 
335                        
336                        long beginIntervalTimestamp = currentTime - truncateIntervalMultiplier * 1000;
337                        long endIntervalTimestamp = currentTime - previousTruncateIntervalTimeMultiplier * 1000;
338                        
339                        Map<FileHistoryId, List<FileVersion>> newPurgeFileVersions = localDatabase.getFileHistoriesToPurgeInInterval(
340                                        beginIntervalTimestamp, endIntervalTimestamp, truncateIntervalTimeUnit);
341
342                        putAllFileVersionsInMap(newPurgeFileVersions, purgeFileVersions);
343                        previousTruncateIntervalTimeMultiplier = truncateIntervalMultiplier;
344                }
345
346                return purgeFileVersions;
347        }
348
349        private void putAllFileVersionsInMap(Map<FileHistoryId, List<FileVersion>> newFileVersions,
350                        Map<FileHistoryId, List<FileVersion>> fileHistoryPurgeFileVersions) {
351                
352                for (FileHistoryId fileHistoryId : newFileVersions.keySet()) {
353                        List<FileVersion> purgeFileVersions = fileHistoryPurgeFileVersions.get(fileHistoryId);
354                        List<FileVersion> newPurgeFileVersions = newFileVersions.get(fileHistoryId);
355                        
356                        if (purgeFileVersions != null) {
357                                purgeFileVersions.addAll(newPurgeFileVersions);
358                        }
359                        else {
360                                fileHistoryPurgeFileVersions.put(fileHistoryId, newPurgeFileVersions);
361                        }
362                }
363        }
364
365        /**
366         * This method adds unusedMultiChunks to the @{link RemoteTransaction} for deletion.
367         *
368         * @param unusedMultiChunks which are to be deleted because all references to them are gone.
369         */
370        private void deleteUnusedRemoteMultiChunks(Map<MultiChunkId, MultiChunkEntry> unusedMultiChunks) throws StorageException {
371                logger.log(Level.INFO, "- Deleting remote multichunks ...");
372
373                for (MultiChunkEntry multiChunkEntry : unusedMultiChunks.values()) {
374                        logger.log(Level.FINE, "  + Deleting remote multichunk " + multiChunkEntry + " ...");
375                        remoteTransaction.delete(new MultichunkRemoteFile(multiChunkEntry.getId()));
376                }
377        }
378
379        private boolean hasDirtyDatabaseVersions() {
380                Iterator<DatabaseVersion> dirtyDatabaseVersions = localDatabase.getDirtyDatabaseVersions();
381                return dirtyDatabaseVersions.hasNext(); // TODO [low] Is this a resource creeper?
382        }
383
384        private boolean hasRemoteChanges() throws Exception {
385                LsRemoteOperationResult lsRemoteOperationResult = new LsRemoteOperation(config).execute();
386                return lsRemoteOperationResult.getUnknownRemoteDatabases().size() > 0;
387        }
388
389        /**
390         * Checks if Cleanup has been performed less then a configurable time ago.
391         */
392        private boolean wasCleanedRecently() throws Exception {
393                Long lastCleanupTime = localDatabase.getCleanupTime();
394
395                if (lastCleanupTime == null) {
396                        return false;
397                }
398                else {
399                        return lastCleanupTime + options.getMinSecondsBetweenCleanups() > System.currentTimeMillis() / 1000;
400                }
401        }
402
403        /**
404         * This method deletes all remote database files and writes new ones for each client using the local database.
405         * To make the state clear and prevent issues with replacing files, new database files are given a higher number
406         * than all existing database files.
407         * Both the deletions and the new files added to the current @{link RemoteTransaction}.
408         */
409        private void mergeRemoteFiles() throws Exception {
410                // Retrieve all database versions
411                Map<String, List<DatabaseRemoteFile>> allDatabaseFilesMap = retrieveAllRemoteDatabaseFiles();
412
413                boolean needMerge = needMerge(allDatabaseFilesMap);
414
415                if (!needMerge) {
416                        logger.log(Level.INFO, "- No purging happened. Number of database files does not exceed threshold. Not merging remote files.");
417                        return;
418                }
419
420                // Now do the merge!
421                logger.log(Level.INFO, "- Merge remote files ...");
422
423                List<DatabaseRemoteFile> allToDeleteDatabaseFiles = new ArrayList<DatabaseRemoteFile>();
424                Map<File, DatabaseRemoteFile> allMergedDatabaseFiles = new TreeMap<File, DatabaseRemoteFile>();
425
426                for (String client : allDatabaseFilesMap.keySet()) {
427                        List<DatabaseRemoteFile> clientDatabaseFiles = allDatabaseFilesMap.get(client);
428                        Collections.sort(clientDatabaseFiles);
429                        logger.log(Level.INFO, "Databases: " + clientDatabaseFiles);
430
431                        // 1. Determine files to delete remotely
432                        List<DatabaseRemoteFile> toDeleteDatabaseFiles = new ArrayList<DatabaseRemoteFile>(clientDatabaseFiles);
433                        allToDeleteDatabaseFiles.addAll(toDeleteDatabaseFiles);
434
435                        // 2. Write new database file and save it in allMergedDatabaseFiles
436                        writeMergeFile(client, allMergedDatabaseFiles);
437
438                }
439
440                rememberDatabases(allMergedDatabaseFiles);
441
442                // 3. Prepare transaction
443
444                // Queue old databases for deletion
445                for (RemoteFile toDeleteRemoteFile : allToDeleteDatabaseFiles) {
446                        logger.log(Level.INFO, "   + Deleting remote file " + toDeleteRemoteFile + " ...");
447                        remoteTransaction.delete(toDeleteRemoteFile);
448                }
449
450                // Queue new databases for uploading
451                for (File lastLocalMergeDatabaseFile : allMergedDatabaseFiles.keySet()) {
452                        RemoteFile lastRemoteMergeDatabaseFile = allMergedDatabaseFiles.get(lastLocalMergeDatabaseFile);
453
454                        logger.log(Level.INFO, "   + Uploading new file {0} from local file {1} ...", new Object[] { lastRemoteMergeDatabaseFile,
455                                        lastLocalMergeDatabaseFile });
456
457                        remoteTransaction.upload(lastLocalMergeDatabaseFile, lastRemoteMergeDatabaseFile);
458                }
459
460                finishMerging();
461
462                // Update stats
463                result.setMergedDatabaseFilesCount(allToDeleteDatabaseFiles.size());
464        }
465
466        /**
467         * This method decides if a merge is needed. Most of the time it will be, since we need to merge every time we remove
468         * any FileVersions to delete them remotely. Another reason for merging is if the number of files exceeds a certain threshold.
469         * This threshold scales linearly with the number of clients that have database files.
470         *
471         * @param allDatabaseFilesMap used to determine if there are too many database files.
472         *
473         * @return true if there are too many database files or we have removed FileVersions, false otherwise.
474         */
475        private boolean needMerge(Map<String, List<DatabaseRemoteFile>> allDatabaseFilesMap) {
476                int numberOfDatabaseFiles = 0;
477
478                for (String client : allDatabaseFilesMap.keySet()) {
479                        numberOfDatabaseFiles += allDatabaseFilesMap.get(client).size();
480                }
481
482                // A client will merge databases if the number of databases exceeds the maximum number per client times the amount of clients
483                int maxDatabaseFiles = options.getMaxDatabaseFiles() * allDatabaseFilesMap.keySet().size();
484                boolean tooManyDatabaseFiles = numberOfDatabaseFiles > maxDatabaseFiles;
485                boolean removedOldVersions = result.getRemovedOldVersionsCount() > 0;
486
487                return removedOldVersions || tooManyDatabaseFiles || options.isForce();
488        }
489
490        /**
491         * This method writes the file with merged databases for a single client and adds it to a Map containing all merged
492         * database files. This is done by querying the local database for all {@link DatabaseVersion}s by this client and
493         * serializing them.
494         *
495         * @param clientName for which we want to write the merged dataabse file.
496         * @param allMergedDatabaseFiles Map where we add the merged file once it is written.
497         */
498        private void writeMergeFile(String clientName, Map<File, DatabaseRemoteFile> allMergedDatabaseFiles)
499                        throws StorageException, IOException {
500
501                // Increment the version by 1, to signal cleanup has occurred
502
503                long lastClientVersion = getNewestDatabaseFileVersion(clientName, localDatabase.getKnownDatabases());
504                DatabaseRemoteFile newRemoteMergeDatabaseFile = new DatabaseRemoteFile(clientName, lastClientVersion + 1);
505
506                File newLocalMergeDatabaseFile = config.getCache().getDatabaseFile(newRemoteMergeDatabaseFile.getName());
507
508                logger.log(Level.INFO, "   + Writing new merge file (all files up to {0}) to {1} ...", new Object[] { lastClientVersion,
509                                newLocalMergeDatabaseFile });
510
511                Iterator<DatabaseVersion> lastNDatabaseVersions = localDatabase.getDatabaseVersionsTo(clientName, lastClientVersion);
512
513                DatabaseXmlSerializer databaseDAO = new DatabaseXmlSerializer(config.getTransformer());
514                databaseDAO.save(lastNDatabaseVersions, newLocalMergeDatabaseFile);
515                allMergedDatabaseFiles.put(newLocalMergeDatabaseFile, newRemoteMergeDatabaseFile);
516        }
517
518        /**
519         * This method locally remembers which databases were newly uploaded, such that they will not be downloaded in
520         * future Downs.
521         */
522        private void rememberDatabases(Map<File, DatabaseRemoteFile> allMergedDatabaseFiles) throws SQLException {
523                // Remember newly written files as so not to redownload them later.
524                List<DatabaseRemoteFile> newRemoteMergeDatabaseFiles = new ArrayList<DatabaseRemoteFile>();
525                newRemoteMergeDatabaseFiles.addAll(allMergedDatabaseFiles.values());
526
527                logger.log(Level.INFO, "Writing new known databases table: " + newRemoteMergeDatabaseFiles);
528
529                localDatabase.removeKnownDatabases();
530                localDatabase.writeKnownRemoteDatabases(newRemoteMergeDatabaseFiles);
531        }
532
533        /**
534         * This method finishes the merging of remote files, by attempting to commit the {@link RemoteTransaction}.
535         * If this fails, it will roll back the local database.
536         */
537        private void finishMerging() throws Exception {
538                updateCleanupFileInTransaction();
539
540                try {
541                        logger.log(Level.INFO, "Cleanup: COMMITTING TX ...");
542
543                        remoteTransaction.commit();
544                        localDatabase.commit();
545                }
546                catch (StorageException e) {
547                        logger.log(Level.INFO, "Cleanup: FAILED TO COMMIT TX. Rolling back ...");
548
549                        localDatabase.rollback();
550                        throw e;
551                }
552
553                logger.log(Level.INFO, "Cleanup: SUCCESS COMMITTING TX.");
554        }
555
556        /**
557         * This method obtains a Map with Lists of {@link DatabaseRemoteFile}s as values, by listing them in the remote repo and
558         * collecting the files per client.
559         *
560         * @return a Map with clientNames as keys and lists of corresponding DatabaseRemoteFiles as values.
561         */
562        private Map<String, List<DatabaseRemoteFile>> retrieveAllRemoteDatabaseFiles() throws StorageException {
563                SortedMap<String, List<DatabaseRemoteFile>> allDatabaseRemoteFilesMap = new TreeMap<String, List<DatabaseRemoteFile>>();
564                Map<String, DatabaseRemoteFile> allDatabaseRemoteFiles = transferManager.list(DatabaseRemoteFile.class);
565
566                for (Map.Entry<String, DatabaseRemoteFile> entry : allDatabaseRemoteFiles.entrySet()) {
567                        String clientName = entry.getValue().getClientName();
568
569                        if (allDatabaseRemoteFilesMap.get(clientName) == null) {
570                                allDatabaseRemoteFilesMap.put(clientName, new ArrayList<DatabaseRemoteFile>());
571                        }
572
573                        allDatabaseRemoteFilesMap.get(clientName).add(entry.getValue());
574                }
575
576                return allDatabaseRemoteFilesMap;
577        }
578
579        /**
580         * This method checks what the current cleanup number is, increments it by one and adds
581         * a new cleanup file to the transaction, to signify to other clients that Cleanup has occurred.
582         */
583        private void updateCleanupFileInTransaction() throws StorageException, IOException {
584                if (remoteTransaction.isEmpty()) {
585                        // No need to bump numbers
586                        return;
587                }
588                // Find all existing cleanup files
589                Map<String, CleanupRemoteFile> cleanupFiles = transferManager.list(CleanupRemoteFile.class);
590
591                long lastRemoteCleanupNumber = getLastRemoteCleanupNumber(cleanupFiles);
592
593                // Schedule any existing cleanup files for deletion
594                for (CleanupRemoteFile cleanupRemoteFile : cleanupFiles.values()) {
595                        remoteTransaction.delete(cleanupRemoteFile);
596                }
597
598                // Upload a new cleanup file that indicates changes
599                File newCleanupFile = config.getCache().createTempFile("cleanup");
600                long newCleanupNumber = lastRemoteCleanupNumber + 1;
601
602                remoteTransaction.upload(newCleanupFile, new CleanupRemoteFile(newCleanupNumber));
603                localDatabase.writeCleanupNumber(newCleanupNumber);
604        }
605
606        /**
607         * The cleanup time is used to check if cleanup has been done recently. If it has, we do not need
608         * to clean again.
609         */
610        private void updateLastCleanupTime() throws SQLException {
611                // Set cleanup number locally
612                localDatabase.writeCleanupTime(System.currentTimeMillis() / 1000);
613                localDatabase.commit();
614        }
615}