]> git.mxchange.org Git - simgear.git/commitdiff
Partial update support for HTTP repos.
authorJames Turner <zakalawe@mac.com>
Tue, 29 Mar 2016 17:11:56 +0000 (18:11 +0100)
committerRoland Haeder <roland@mxchange.org>
Sat, 13 Aug 2016 08:21:16 +0000 (10:21 +0200)
Not used yet, we set full-update mode on repositories for the
moment. Pending Terrasync change will enable partial mode.

simgear/io/HTTPRepository.cxx
simgear/io/HTTPRepository.hxx
simgear/io/test_repository.cxx
simgear/scene/tsync/terrasync.cxx

index dc4dc6eae999d9cf4fbdd74f04bfa4687fa36bb4..d87fe3f4c98adc817104f9f825cb3592408ff521 100644 (file)
@@ -57,8 +57,6 @@ namespace simgear
         {
         }
 
-        virtual void cancel();
-
         size_t contentSize() const
         {
             return _contentSize;
@@ -104,6 +102,7 @@ public:
         hashCacheDirty(false),
         p(parent),
         isUpdating(false),
+        updateEverything(false),
         status(HTTPRepository::REPO_NO_ERROR),
         totalDownloaded(0)
     { ; }
@@ -115,10 +114,14 @@ public:
     std::string baseUrl;
     SGPath basePath;
     bool isUpdating;
+    bool updateEverything;
+    string_list updatePaths;
     HTTPRepository::ResultCode status;
     HTTPDirectory* rootDir;
     size_t totalDownloaded;
 
+    void updateWaiting();
+
     HTTP::Request_ptr updateFile(HTTPDirectory* dir, const std::string& name,
                                  size_t sz);
     HTTP::Request_ptr updateDir(HTTPDirectory* dir, const std::string& hash,
@@ -192,10 +195,12 @@ class HTTPDirectory
     typedef std::vector<ChildInfo> ChildInfoList;
     ChildInfoList children;
 
+
 public:
     HTTPDirectory(HTTPRepoPrivate* repo, const std::string& path) :
         _repository(repo),
-        _relativePath(path)
+        _relativePath(path),
+        _state(DoNotUpdate)
   {
       assert(repo);
 
@@ -232,6 +237,8 @@ public:
         fpath.append(".dirindex");
         _repository->updatedFileContents(fpath, hash);
 
+        _state = Updated;
+
         children.clear();
         parseDirIndex(children);
         std::sort(children.begin(), children.end());
@@ -239,6 +246,7 @@ public:
 
     void failedToUpdate(HTTPRepository::ResultCode status)
     {
+        _state = UpdateFailed;
         if (_relativePath.isNull()) {
             // root dir failed
             _repository->failedToGetRootIndex(status);
@@ -249,7 +257,11 @@ public:
 
     void updateChildrenBasedOnHash()
     {
-        //SG_LOG(SG_TERRASYNC, SG_DEBUG, "updated children for:" << relativePath());
+        // if we got here for a dir which is still updating or excluded
+        // from updates, just bail out right now.
+        if (_state != Updated) {
+            return;
+        }
 
         string_list indexNames = indexChildren(),
             toBeUpdated, orphans;
@@ -284,6 +296,9 @@ public:
                     SGPath p(relativePath());
                     p.append(it->file());
                     HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str());
+                    if (childDir->_state == NotUpdated) {
+                        childDir->_state = Updated;
+                    }
                     childDir->updateChildrenBasedOnHash();
                 }
             }
@@ -301,6 +316,101 @@ public:
         scheduleUpdates(toBeUpdated);
     }
 
+    void markAsUpToDate()
+    {
+        _state = Updated;
+    }
+
+    void markAsUpdating()
+    {
+        assert(_state == NotUpdated);
+        _state = HTTPDirectory::UpdateInProgress;
+    }
+
+    void markAsEnabled()
+    {
+        // assert because this should only get invoked on newly created
+        // directory objects which are inside the sub-tree(s) to be updated
+        assert(_state == DoNotUpdate);
+        _state = NotUpdated;
+    }
+
+    void markSubtreeAsNeedingUpdate()
+    {
+        if (_state == Updated) {
+            _state = NotUpdated; // reset back to not-updated
+        }
+
+        ChildInfoList::iterator cit;
+        for (cit = children.begin(); cit != children.end(); ++cit) {
+            if (cit->type == ChildInfo::DirectoryType) {
+                SGPath p(relativePath());
+                p.append(cit->name);
+                HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str());
+                childDir->markSubtreeAsNeedingUpdate();
+            }
+        } // of child iteration
+    }
+
+    void markSubtreeAsEnabled()
+    {
+        if (_state == DoNotUpdate) {
+            markAsEnabled();
+        }
+
+        ChildInfoList::iterator cit;
+        for (cit = children.begin(); cit != children.end(); ++cit) {
+            if (cit->type == ChildInfo::DirectoryType) {
+                SGPath p(relativePath());
+                p.append(cit->name);
+                HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str());
+                childDir->markSubtreeAsEnabled();
+            }
+        } // of child iteration
+    }
+
+
+    void markAncestorChainAsEnabled()
+    {
+        if (_state == DoNotUpdate) {
+            markAsEnabled();
+        }
+
+        if (_relativePath.isNull()) {
+            return;
+        }
+
+        std::string prPath = _relativePath.dir();
+        if (prPath.empty()) {
+            _repository->rootDir->markAncestorChainAsEnabled();
+        } else {
+            HTTPDirectory* prDir = _repository->getOrCreateDirectory(prPath);
+            prDir->markAncestorChainAsEnabled();
+        }
+    }
+
+    void updateIfWaiting(const std::string& hash, size_t sz)
+    {
+        if (_state == NotUpdated) {
+            _repository->updateDir(this, hash, sz);
+            return;
+        }
+
+        if ((_state == DoNotUpdate) || (_state == UpdateInProgress)) {
+            return;
+        }
+
+        ChildInfoList::iterator cit;
+        for (cit = children.begin(); cit != children.end(); ++cit) {
+            if (cit->type == ChildInfo::DirectoryType) {
+                SGPath p(relativePath());
+                p.append(cit->name);
+                HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str());
+                childDir->updateIfWaiting(cit->hash, cit->sizeInBytes);
+            }
+        } // of child iteration
+    }
+
     void removeOrphans(const string_list& orphans)
     {
         string_list::const_iterator it;
@@ -337,6 +447,11 @@ public:
                 SGPath p(relativePath());
                 p.append(*it);
                 HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str());
+                if (childDir->_state == DoNotUpdate) {
+                    SG_LOG(SG_TERRASYNC, SG_WARN, "scheduleUpdate, child:" << *it << " is marked do not update so skipping");
+                    continue;
+                }
+
                 _repository->updateDir(childDir, cit->hash, cit->sizeInBytes);
             }
         }
@@ -365,11 +480,12 @@ public:
             fpath.append(file);
 
             if (it->hash != hash) {
+                // we don't erase the file on a hash mismatch, becuase if we're syncing during the
+                // middle of a server-side update, the downloaded file may actually become valid.
                 _repository->failedToUpdateChild(_relativePath, HTTPRepository::REPO_ERROR_CHECKSUM);
             } else {
                 _repository->updatedFileContents(fpath, hash);
                 _repository->totalDownloaded += sz;
-                //SG_LOG(SG_TERRASYNC, SG_INFO, "did update:" << fpath);
             } // of hash matches
         } // of found in child list
     }
@@ -495,7 +611,16 @@ private:
   HTTPRepoPrivate* _repository;
   SGPath _relativePath; // in URL and file-system space
 
-
+    typedef enum
+    {
+        NotUpdated,
+        UpdateInProgress,
+        Updated,
+        UpdateFailed,
+        DoNotUpdate
+    } State;
+
+    State _state;
 };
 
 HTTPRepository::HTTPRepository(const SGPath& base, HTTP::Client *cl) :
@@ -533,14 +658,38 @@ SGPath HTTPRepository::fsBase() const
 
 void HTTPRepository::update()
 {
-    if (_d->isUpdating) {
+    _d->rootDir->markSubtreeAsNeedingUpdate();
+    _d->updateWaiting();
+}
+
+void HTTPRepository::setEntireRepositoryMode()
+{
+    if (!_d->updateEverything) {
+        // this is a one-way decision
+        _d->updateEverything = true;
+    }
+
+    // probably overkill but not expensive so let's check everything
+    // we have in case someone did something funky and switched from partial
+    // to 'whole repo' updating.
+    _d->rootDir->markSubtreeAsEnabled();
+}
+
+
+void HTTPRepository::addSubpath(const std::string& relPath)
+{
+    if (_d->updateEverything) {
+        SG_LOG(SG_TERRASYNC, SG_WARN, "called HTTPRepository::addSubpath but updating everything");
         return;
     }
 
-    _d->status = REPO_NO_ERROR;
-    _d->isUpdating = true;
-    _d->failures.clear();
-    _d->updateDir(_d->rootDir, std::string(), 0);
+    _d->updatePaths.push_back(relPath);
+
+    HTTPDirectory* dir = _d->getOrCreateDirectory(relPath);
+    dir->markSubtreeAsEnabled();
+    dir->markAncestorChainAsEnabled();
+
+    _d->updateWaiting();
 }
 
 bool HTTPRepository::isDoingSync() const
@@ -590,12 +739,6 @@ HTTPRepository::failure() const
     return _d->status;
 }
 
-    void HTTPRepoGetRequest::cancel()
-    {
-        _directory->repository()->http->cancelRequest(this, "Reposiotry cancelled");
-        _directory = 0;
-    }
-
     class FileGetRequest : public HTTPRepoGetRequest
     {
     public:
@@ -605,7 +748,6 @@ HTTPRepository::failure() const
         {
             pathInRepo = _directory->absolutePath();
             pathInRepo.append(fileName);
-            //SG_LOG(SG_TERRASYNC, SG_INFO, "will GET file " << url());
         }
 
     protected:
@@ -628,6 +770,7 @@ HTTPRepository::failure() const
         virtual void onDone()
         {
             file->close();
+
             if (responseCode() == 200) {
                 std::string hash = strutils::encodeHex(sha1_result(&hashContext), HASH_LENGTH);
                 _directory->didUpdateFile(fileName, hash, contentSize());
@@ -649,7 +792,7 @@ HTTPRepository::failure() const
             if (pathInRepo.exists()) {
                 pathInRepo.remove();
             }
-            
+
             if (_directory) {
                 _directory->didFailToUpdateFile(fileName, HTTPRepository::REPO_ERROR_SOCKET);
                 _directory->repository()->finishedRequest(this);
@@ -676,7 +819,6 @@ HTTPRepository::failure() const
             _targetHash(targetHash)
         {
             sha1_init(&hashContext);
-           //SG_LOG(SG_TERRASYNC, SG_INFO, "will GET dir " << url());
         }
 
         void setIsRootDir()
@@ -725,8 +867,8 @@ HTTPRepository::failure() const
                     of.write(body.data(), body.size());
                     of.close();
                     _directory->dirIndexUpdated(hash);
-
-                    //SG_LOG(SG_TERRASYNC, SG_INFO, "updated dir index " << _directory->absolutePath());
+                } else {
+                    _directory->markAsUpToDate();
                 }
 
                 _directory->repository()->totalDownloaded += contentSize();
@@ -778,15 +920,18 @@ HTTPRepository::failure() const
 
     HTTPRepoPrivate::~HTTPRepoPrivate()
     {
+        // take a copy since cancelRequest will fail and hence remove
+        // remove activeRequests, invalidating any iterator to it.
+        RequestVector copyOfActive(activeRequests);
+        RequestVector::iterator rq;
+        for (rq = copyOfActive.begin(); rq != copyOfActive.end(); ++rq) {
+            http->cancelRequest(*rq, "Repository object deleted");
+        }
+
         DirectoryVector::iterator it;
         for (it=directories.begin(); it != directories.end(); ++it) {
             delete *it;
         }
-
-        RequestVector::iterator r;
-        for (r=activeRequests.begin(); r != activeRequests.end(); ++r) {
-            (*r)->cancel();
-        }
     }
 
     HTTP::Request_ptr HTTPRepoPrivate::updateFile(HTTPDirectory* dir, const std::string& name, size_t sz)
@@ -799,6 +944,7 @@ HTTPRepository::failure() const
 
     HTTP::Request_ptr HTTPRepoPrivate::updateDir(HTTPDirectory* dir, const std::string& hash, size_t sz)
     {
+        dir->markAsUpdating();
         RepoRequestPtr r(new DirGetRequest(dir, hash));
         r->setContentSize(sz);
         makeRequest(r);
@@ -966,6 +1112,25 @@ HTTPRepository::failure() const
 
         HTTPDirectory* d = new HTTPDirectory(this, path);
         directories.push_back(d);
+        if (updateEverything) {
+            d->markAsEnabled();
+        } else {
+            string_list::const_iterator s;
+            bool shouldUpdate = false;
+
+            for (s = updatePaths.begin(); s != updatePaths.end(); ++s) {
+                size_t minLen = std::min(path.size(), s->size());
+                if (s->compare(0, minLen, path, 0, minLen) == 0) {
+                    shouldUpdate = true;
+                    break;
+                }
+            } // of paths iteration
+
+            if (shouldUpdate) {
+                d->markAsEnabled();
+            }
+        }
+
         return d;
     }
 
@@ -1002,10 +1167,11 @@ HTTPRepository::failure() const
     void HTTPRepoPrivate::finishedRequest(const RepoRequestPtr& req)
     {
         RequestVector::iterator it = std::find(activeRequests.begin(), activeRequests.end(), req);
-        if (it == activeRequests.end()) {
-            throw sg_exception("lost request somehow", req->url());
+        // in some cases, for example a checksum failure, we clear the active
+        // and queued request vectors, so the ::find above can fail
+        if (it != activeRequests.end()) {
+            activeRequests.erase(it);
         }
-        activeRequests.erase(it);
 
         if (!queuedRequests.empty()) {
             RepoRequestPtr rr = queuedRequests.front();
@@ -1030,6 +1196,27 @@ HTTPRepository::failure() const
     void HTTPRepoPrivate::failedToUpdateChild(const SGPath& relativePath,
                                               HTTPRepository::ResultCode fileStatus)
     {
+        if (fileStatus == HTTPRepository::REPO_ERROR_CHECKSUM) {
+            // stop updating, and mark repository as failed, becuase this
+            // usually indicates we need to start a fresh update from the
+            // root.
+            // (we could issue a retry here, but we leave that to higher layers)
+            status = fileStatus;
+
+            queuedRequests.clear();
+
+            RequestVector copyOfActive(activeRequests);
+            RequestVector::iterator rq;
+            for (rq = copyOfActive.begin(); rq != copyOfActive.end(); ++rq) {
+                //SG_LOG(SG_TERRASYNC, SG_DEBUG, "cancelling request for:" << (*rq)->url());
+                http->cancelRequest(*rq, "Repository updated failed");
+            }
+
+
+            SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update repository:" << baseUrl
+                   << ", possibly modified during sync");
+        }
+
         Failure f;
         f.path = relativePath;
         f.error = fileStatus;
@@ -1038,6 +1225,22 @@ HTTPRepository::failure() const
         SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update entry:" << relativePath << " code:" << fileStatus);
     }
 
+    void HTTPRepoPrivate::updateWaiting()
+    {
+        if (!isUpdating) {
+            status = HTTPRepository::REPO_NO_ERROR;
+            isUpdating = true;
+            failures.clear();
+        }
+
+        // find to-be-updated sub-trees and kick them off
+        rootDir->updateIfWaiting(std::string(), 0);
 
+        // maybe there was nothing to do
+        if (activeRequests.empty()) {
+            status = HTTPRepository::REPO_NO_ERROR;
+            isUpdating = false;
+        }
+    }
 
 } // of namespace simgear
index 0b82f069baf51c1c104ec80e27d6048a81da6217..7d46c3ddb9c81ece2e98e3b89b13119cabc223ab 100644 (file)
@@ -57,6 +57,13 @@ public:
 
     virtual void update();
 
+    /**
+     * set if we should sync the entire repository
+     */
+    void setEntireRepositoryMode();
+
+    void addSubpath(const std::string& relPath);
+
     virtual bool isDoingSync() const;
 
     virtual ResultCode failure() const;
index f179c5d2b1ad46cbf5f238888d5df9408335f658..da47d0053e1b68667c5c67ea4318e3aa6a7a560d 100644 (file)
@@ -19,6 +19,8 @@
 #include <simgear/debug/logstream.hxx>
 #include <simgear/misc/sg_dir.hxx>
 #include <simgear/structure/exception.hxx>
+#include <simgear/structure/callback.hxx>
+
 #include <simgear/io/sg_file.hxx>
 
 using namespace simgear;
@@ -68,9 +70,12 @@ public:
     int requestCount;
     bool getWillFail;
     bool returnCorruptData;
+    std::auto_ptr<SGCallback> accessCallback;
 
     void clearRequestCounts();
 
+    void clearFailFlags();
+
     void setGetWillFail(bool b)
     {
         getWillFail = b;
@@ -113,7 +118,7 @@ public:
         if (path.empty()) {
             return this;
         }
-        
+
         string_list pathParts = strutils::split(path, "/");
         TestRepoEntry* entry = childEntry(pathParts.front());
         if (pathParts.size() == 1) {
@@ -213,6 +218,18 @@ void TestRepoEntry::clearRequestCounts()
     }
 }
 
+void TestRepoEntry::clearFailFlags()
+{
+    getWillFail = false;
+    returnCorruptData = false;
+
+    if (isDir) {
+        for (size_t i=0; i<children.size(); ++i) {
+            children[i]->clearFailFlags();
+        }
+    }
+}
+
 TestRepoEntry* global_repo = NULL;
 
 class TestRepositoryChannel : public TestServerChannel
@@ -249,6 +266,10 @@ public:
                 return;
             }
 
+            if (entry->accessCallback.get()) {
+                (*entry->accessCallback)();
+            }
+
             if (entry->getWillFail) {
                 sendErrorResponse(404, false, "entry marked to fail explicitly:" + repoPath);
                 return;
@@ -316,6 +337,15 @@ void verifyFileState(const SGPath& fsRoot, const std::string& relPath)
     }
 }
 
+void verifyFileNotPresent(const SGPath& fsRoot, const std::string& relPath)
+{
+    SGPath p(fsRoot);
+    p.append(relPath);
+    if (p.exists()) {
+        throw sg_error("Present file system entry", relPath);
+    }
+}
+
 void verifyRequestCount(const std::string& relPath, int count)
 {
     TestRepoEntry* entry = global_repo->findEntry(relPath);
@@ -370,9 +400,10 @@ void testBasicClone(HTTP::Client* cl)
     p.append("http_repo_basic");
     simgear::Dir pd(p);
     pd.removeChildren();
-    
+
     repo.reset(new HTTPRepository(p, cl));
     repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
     repo->update();
 
     waitForUpdateComplete(cl, repo.get());
@@ -410,6 +441,7 @@ void testModifyLocalFiles(HTTP::Client* cl)
 
     repo.reset(new HTTPRepository(p, cl));
     repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
     repo->update();
 
     waitForUpdateComplete(cl, repo.get());
@@ -451,6 +483,7 @@ void testMergeExistingFileWithoutDownload(HTTP::Client* cl)
 
     repo.reset(new HTTPRepository(p, cl));
     repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
 
     createFile(p, "dirC/fileCB", 4); // should match
     createFile(p, "dirC/fileCC", 3); // mismatch
@@ -493,6 +526,7 @@ void testLossOfLocalFiles(HTTP::Client* cl)
 
     repo.reset(new HTTPRepository(p, cl));
     repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
     repo->update();
     waitForUpdateComplete(cl, repo.get());
     verifyFileState(p, "dirB/subdirA/fileBAA");
@@ -530,6 +564,7 @@ void testAbandonMissingFiles(HTTP::Client* cl)
 
     repo.reset(new HTTPRepository(p, cl));
     repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
     repo->update();
     waitForUpdateComplete(cl, repo.get());
     if (repo->failure() != HTTPRepository::REPO_PARTIAL_UPDATE) {
@@ -554,18 +589,285 @@ void testAbandonCorruptFiles(HTTP::Client* cl)
 
     repo.reset(new HTTPRepository(p, cl));
     repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
+
     repo->update();
     waitForUpdateComplete(cl, repo.get());
-    if (repo->failure() != HTTPRepository::REPO_PARTIAL_UPDATE) {
+    if (repo->failure() != HTTPRepository::REPO_ERROR_CHECKSUM) {
         throw sg_exception("Bad result from corrupt files test");
     }
 
+    repo.reset();
+    if (cl->hasActiveRequests()) {
+        cl->debugDumpRequests();
+        throw sg_exception("Connection still has requests active");
+    }
+
     std::cout << "Passed test: detect corrupted download" << std::endl;
 }
 
+void testPartialUpdateBasic(HTTP::Client* cl)
+{
+    std::auto_ptr<HTTPRepository> repo;
+    SGPath p(simgear::Dir::current().path());
+    p.append("http_repo_partial_update");
+    simgear::Dir pd(p);
+    if (pd.exists()) {
+        pd.removeChildren();
+    }
+
+    global_repo->clearRequestCounts();
+    global_repo->clearFailFlags();
+    global_repo->defineFile("dirA/subdirF/fileAFA");
+    global_repo->defineFile("dirA/subdirF/fileAFB");
+    global_repo->defineFile("dirA/subdirH/fileAHA");
+    global_repo->defineFile("dirA/subdirH/fileAHB");
+
+    global_repo->defineFile("dirG/subdirA/subsubA/fileGAAB");
+
+// request subdir of A
+    repo.reset(new HTTPRepository(p, cl));
+    repo->setBaseUrl("http://localhost:2000/repo");
+    repo->addSubpath("dirA/subdirF");
+    waitForUpdateComplete(cl, repo.get());
+
+    verifyFileState(p, "dirA/subdirF/fileAFA");
+    verifyFileState(p, "dirA/subdirF/fileAFB");
+
+    verifyFileState(p, "fileA"); // files are always synced
+    verifyFileState(p, "dirA/fileAB");
+    verifyFileNotPresent(p, "dirB/subdirB/fileBBB");
+    verifyFileNotPresent(p, "dirD");
+    verifyFileNotPresent(p, "dirA/subdirH/fileAHB");
+
+    verifyRequestCount("dirA", 1);
+    verifyRequestCount("dirA/fileAA", 1);
+    verifyRequestCount("dirA/subdirF", 1);
+    verifyRequestCount("dirA/subdirF/fileAFA", 1);
+    verifyRequestCount("dirA/subdirF/fileAFB", 1);
+    verifyRequestCount("dirB", 0);
+    verifyRequestCount("dirG", 0);
+
+// now request dir B
+    repo->addSubpath("dirB");
+    waitForUpdateComplete(cl, repo.get());
+
+    verifyFileState(p, "dirA/subdirF/fileAFB");
+    verifyFileState(p, "dirB/subdirB/fileBBA");
+    verifyFileState(p, "dirB/subdirB/fileBBB");
+
+    verifyRequestCount("dirB", 1);
+    verifyRequestCount("dirB/subdirA/fileBAC", 1);
+    verifyRequestCount("dirA", 1);
+    verifyRequestCount("dirA/fileAA", 1);
+    verifyRequestCount("dirG", 0);
+
+// widen subdir to parent
+    repo->addSubpath("dirA");
+    waitForUpdateComplete(cl, repo.get());
+
+    verifyFileState(p, "dirA/subdirH/fileAHA");
+    verifyFileState(p, "dirA/subdirH/fileAHB");
+
+    verifyRequestCount("dirA", 1);
+    verifyRequestCount("dirB/subdirA/fileBAC", 1);
+    verifyRequestCount("dirA/subdirF/fileAFA", 1);
+
+// request an already fetched subdir - should be a no-op
+    repo->addSubpath("dirB/subdirB");
+    waitForUpdateComplete(cl, repo.get());
+
+    verifyRequestCount("dirB", 1);
+    verifyRequestCount("dirB/subdirB/fileBBB", 1);
+
+// add new / modify files inside
+    global_repo->defineFile("dirA/subdirF/fileAFC");
+    global_repo->defineFile("dirA/subdirF/fileAFD");
+    repo->update();
+    waitForUpdateComplete(cl, repo.get());
+
+    if (global_repo->requestCount != 2) {
+        throw sg_exception("Bad root request count");
+    }
+
+    verifyFileState(p, "dirA/subdirF/fileAFC");
+    verifyFileState(p, "dirA/subdirF/fileAFD");
+
+    std::cout << "Passed test: basic partial clone and update" << std::endl;
+}
+
+void testPartialUpdateExisting(HTTP::Client* cl)
+{
+    std::auto_ptr<HTTPRepository> repo;
+    SGPath p(simgear::Dir::current().path());
+    p.append("http_repo_partial_update_existing");
+    simgear::Dir pd(p);
+    if (pd.exists()) {
+        pd.removeChildren();
+    }
+
+    global_repo->clearRequestCounts();
+    global_repo->clearFailFlags();
+
+// full update to sync everything
+    repo.reset(new HTTPRepository(p, cl));
+    repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
+    repo->update();
+    waitForUpdateComplete(cl, repo.get());
+
+// new repo for partial
+    global_repo->clearRequestCounts();
+    repo.reset(new HTTPRepository(p, cl));
+    repo->setBaseUrl("http://localhost:2000/repo");
+    repo->addSubpath("dirA/subdirF");
+    waitForUpdateComplete(cl, repo.get());
+
+    if (global_repo->requestCount != 1) {
+        throw sg_exception("Bad root request count");
+    }
+
+    verifyRequestCount("dirA", 0);
+    verifyRequestCount("dirA/fileAA", 0);
+    verifyRequestCount("dirA/subdirF", 0);
+    verifyRequestCount("dirA/subdirF/fileAFA", 0);
+    verifyRequestCount("dirA/subdirF/fileAFB", 0);
+
+// and request more dirs
+    // this is a good simulation of terrasync requesting more subdirs of
+    // an already created and in sync tree. should not generate any more
+    // network trip
+    repo->addSubpath("dirC");
+
+    verifyFileState(p, "dirC/subdirA/subsubA/fileCAAA");
+    verifyRequestCount("dirC/subdirA/subsubA/fileCAAA", 0);
+
+    if (global_repo->requestCount != 1) {
+        throw sg_exception("Bad root request count");
+    }
+
+    std::cout << "Passed test: partial update of existing" << std::endl;
+}
+
+void modifyBTree()
+{
+    std::cout << "Modifying sub-tree" << std::endl;
+
+    global_repo->findEntry("dirB/subdirA/fileBAC")->revision++;
+    global_repo->defineFile("dirB/subdirZ/fileBZA");
+    global_repo->findEntry("dirB/subdirB/fileBBB")->revision++;
+}
+
+void testPartialUpdateWidenWhileInProgress(HTTP::Client* cl)
+{
+    std::auto_ptr<HTTPRepository> repo;
+    SGPath p(simgear::Dir::current().path());
+    p.append("http_repo_partial_update_widen");
+    simgear::Dir pd(p);
+    if (pd.exists()) {
+        pd.removeChildren();
+    }
+
+    global_repo->clearRequestCounts();
+    global_repo->clearFailFlags();
+
+    // full update to sync everything
+    repo.reset(new HTTPRepository(p, cl));
+    repo->setBaseUrl("http://localhost:2000/repo");
+
+    repo->addSubpath("dirA/subdirF");
+    repo->addSubpath("dirB/subdirB");
+    waitForUpdateComplete(cl, repo.get());
+
+    verifyRequestCount("dirA/subdirF", 1);
+    if (global_repo->requestCount != 1) {
+        throw sg_exception("Bad root request count");
+    }
+
+    repo->addSubpath("dirA");
+    repo->addSubpath("dirB");
+    repo->addSubpath("dirC");
+
+    waitForUpdateComplete(cl, repo.get());
+
+    // should not request the root again
+    verifyRequestCount("dirA/subdirF", 1);
+    if (global_repo->requestCount != 1) {
+        throw sg_exception("Bad root request count");
+    }
+
+    verifyFileState(p, "dirA/subdirF/fileAFA");
+    verifyFileState(p, "dirC/subdirA/subsubA/fileCAAA");
+
+    std::cout << "Passed test: partial update with widen" << std::endl;
+}
+
+void testServerModifyDuringSync(HTTP::Client* cl)
+{
+    std::auto_ptr<HTTPRepository> repo;
+    SGPath p(simgear::Dir::current().path());
+    p.append("http_repo_server_modify_during_sync");
+    simgear::Dir pd(p);
+    if (pd.exists()) {
+        pd.removeChildren();
+    }
+
+    global_repo->clearRequestCounts();
+    global_repo->clearFailFlags();
+
+    repo.reset(new HTTPRepository(p, cl));
+    repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
+
+    global_repo->findEntry("dirA/fileAA")->accessCallback.reset(make_callback(&modifyBTree));
+
+    repo->update();
+    waitForUpdateComplete(cl, repo.get());
+
+    global_repo->findEntry("dirA/fileAA")->accessCallback.reset();
+
+    if (repo->failure() != HTTPRepository::REPO_ERROR_CHECKSUM) {
+        throw sg_exception("Bad result from corrupt files test");
+    }
+
+    std::cout << "Passed test modify server during sync" << std::endl;
+
+}
+
+void testDestroyDuringSync(HTTP::Client* cl)
+{
+    std::auto_ptr<HTTPRepository> repo;
+    SGPath p(simgear::Dir::current().path());
+    p.append("http_repo_destory_during_sync");
+    simgear::Dir pd(p);
+    if (pd.exists()) {
+        pd.removeChildren();
+    }
+
+    global_repo->clearRequestCounts();
+    global_repo->clearFailFlags();
+
+    repo.reset(new HTTPRepository(p, cl));
+    repo->setBaseUrl("http://localhost:2000/repo");
+    repo->setEntireRepositoryMode();
+
+    repo->update();
+
+    // would ideally spin slightly here
+
+    repo.reset();
+
+    if (cl->hasActiveRequests()) {
+        throw sg_exception("destory of repo didn't clean up requests");
+    }
+
+    std::cout << "Passed test destory during sync" << std::endl;
+}
+
+
 int main(int argc, char* argv[])
 {
-  sglog().setLogLevels( SG_ALL, SG_INFO );
+  sglog().setLogLevels( SG_ALL, SG_DEBUG );
 
   HTTP::Client cl;
   cl.setMaxConnections(1);
@@ -581,6 +883,8 @@ int main(int argc, char* argv[])
     global_repo->defineFile("dirB/subdirA/fileBAA");
     global_repo->defineFile("dirB/subdirA/fileBAB");
     global_repo->defineFile("dirB/subdirA/fileBAC");
+    global_repo->defineFile("dirB/subdirB/fileBBA");
+    global_repo->defineFile("dirB/subdirB/fileBBB");
     global_repo->defineFile("dirC/subdirA/subsubA/fileCAAA");
 
     testBasicClone(&cl);
@@ -595,5 +899,13 @@ int main(int argc, char* argv[])
 
     testAbandonCorruptFiles(&cl);
 
+    testPartialUpdateBasic(&cl);
+    testPartialUpdateExisting(&cl);
+    testPartialUpdateWidenWhileInProgress(&cl);
+
+    testServerModifyDuringSync(&cl);
+
+    testDestroyDuringSync(&cl);
+
     return 0;
 }
index 62aa49056d673be2217c5deab5e6fe361d4fe140..da49af48b1092cd30e14d442fc1c53e9a4979859 100644 (file)
@@ -486,6 +486,7 @@ void SGTerraSync::WorkerThread::updateSyncSlot(SyncSlot &slot)
         } // of creating directory step
 
         slot.repository.reset(new HTTPRepository(path, &_http));
+        slot.repository->setEntireRepositoryMode();
         slot.repository->setBaseUrl(_httpServer + "/" + slot.currentItem._dir);
 
         try {