X-Git-Url: https://git.mxchange.org/?a=blobdiff_plain;f=simgear%2Fio%2FHTTPRepository.cxx;h=dc4dc6eae999d9cf4fbdd74f04bfa4687fa36bb4;hb=974cb3b3d31bb687b0dda2e37960394ee5f1b6e5;hp=aa7cf73ba07a539e414a17e1d536dc599088358c;hpb=da6b395008a9b22c8e1ca1a1fcbf21d688a5a185;p=simgear.git diff --git a/simgear/io/HTTPRepository.cxx b/simgear/io/HTTPRepository.cxx index aa7cf73b..dc4dc6ea 100644 --- a/simgear/io/HTTPRepository.cxx +++ b/simgear/io/HTTPRepository.cxx @@ -18,8 +18,9 @@ #include "HTTPRepository.hxx" +#include + #include -#include #include #include #include @@ -38,6 +39,7 @@ #include #include #include +#include #include @@ -46,6 +48,33 @@ namespace simgear class HTTPDirectory; + class HTTPRepoGetRequest : public HTTP::Request + { + public: + HTTPRepoGetRequest(HTTPDirectory* d, const std::string& u) : + HTTP::Request(u), + _directory(d) + { + } + + virtual void cancel(); + + size_t contentSize() const + { + return _contentSize; + } + + void setContentSize(size_t sz) + { + _contentSize = sz; + } + protected: + HTTPDirectory* _directory; + size_t _contentSize; + }; + + typedef SGSharedPtr RepoRequestPtr; + class HTTPRepoPrivate { public: @@ -60,23 +89,40 @@ public: typedef std::vector HashCache; HashCache hashes; + bool hashCacheDirty; + + struct Failure + { + SGPath path; + HTTPRepository::ResultCode error; + }; + + typedef std::vector FailureList; + FailureList failures; HTTPRepoPrivate(HTTPRepository* parent) : - p(parent), - isUpdating(false), - status(AbstractRepository::REPO_NO_ERROR) + hashCacheDirty(false), + p(parent), + isUpdating(false), + status(HTTPRepository::REPO_NO_ERROR), + totalDownloaded(0) { ; } + ~HTTPRepoPrivate(); + HTTPRepository* p; // link back to outer HTTP::Client* http; std::string baseUrl; SGPath basePath; bool isUpdating; - AbstractRepository::ResultCode status; + HTTPRepository::ResultCode status; HTTPDirectory* rootDir; + size_t totalDownloaded; - HTTP::Request_ptr updateFile(HTTPDirectory* dir, const std::string& name); - HTTP::Request_ptr updateDir(HTTPDirectory* dir); + HTTP::Request_ptr updateFile(HTTPDirectory* dir, const std::string& name, + size_t sz); + HTTP::Request_ptr updateDir(HTTPDirectory* dir, const std::string& hash, + size_t sz); std::string hashForPath(const SGPath& p); void updatedFileContents(const SGPath& p, const std::string& newHash); @@ -84,12 +130,16 @@ public: std::string computeHashForPath(const SGPath& p); void writeHashCache(); - void failedToGetRootIndex(); + void failedToGetRootIndex(HTTPRepository::ResultCode st); + void failedToUpdateChild(const SGPath& relativePath, + HTTPRepository::ResultCode fileStatus); - typedef std::vector RequestVector; - RequestVector requests; + typedef std::vector RequestVector; + RequestVector queuedRequests, + activeRequests; - void finishedRequest(const HTTP::Request_ptr& req); + void makeRequest(RepoRequestPtr req); + void finishedRequest(const RepoRequestPtr& req); HTTPDirectory* getOrCreateDirectory(const std::string& path); bool deleteDirectory(const std::string& path); @@ -109,10 +159,10 @@ class HTTPDirectory DirectoryType }; - ChildInfo(Type ty, const char* nameData, const char* hashData) : + ChildInfo(Type ty, const std::string & nameData, const std::string & hashData) : type(ty), name(nameData), - hash(hashData ? hashData : ""), + hash(hashData), sizeInBytes(0) { } @@ -124,9 +174,9 @@ class HTTPDirectory sizeInBytes(other.sizeInBytes) { } - void setSize(const char* sizeData) + void setSize(const std::string & sizeData) { - sizeInBytes = ::strtol(sizeData, NULL, 10); + sizeInBytes = ::strtol(sizeData.c_str(), NULL, 10); } bool operator<(const ChildInfo& other) const @@ -147,16 +197,15 @@ public: _repository(repo), _relativePath(path) { + assert(repo); + SGPath p(absolutePath()); if (p.exists()) { -// what is indexValid for? -// bool indexValid = false; try { // already exists on disk parseDirIndex(children); -// indexValid = true; std::sort(children.begin(), children.end()); - } catch (sg_exception& e) { + } catch (sg_exception& ) { // parsing cache failed children.clear(); } @@ -188,19 +237,19 @@ public: std::sort(children.begin(), children.end()); } - void failedToUpdate() + void failedToUpdate(HTTPRepository::ResultCode status) { if (_relativePath.isNull()) { // root dir failed - _repository->failedToGetRootIndex(); + _repository->failedToGetRootIndex(status); } else { - SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update dir:" << _relativePath); + _repository->failedToUpdateChild(_relativePath, status); } } void updateChildrenBasedOnHash() { - SG_LOG(SG_TERRASYNC, SG_DEBUG, "updated children for:" << relativePath()); + //SG_LOG(SG_TERRASYNC, SG_DEBUG, "updated children for:" << relativePath()); string_list indexNames = indexChildren(), toBeUpdated, orphans; @@ -208,40 +257,41 @@ public: PathList fsChildren = d.children(0); PathList::const_iterator it = fsChildren.begin(); + for (; it != fsChildren.end(); ++it) { ChildInfo info(it->isDir() ? ChildInfo::DirectoryType : ChildInfo::FileType, - it->file().c_str(), NULL); + it->file(), ""); std::string hash = hashForChild(info); ChildInfoList::iterator c = findIndexChild(it->file()); if (c == children.end()) { + SG_LOG(SG_TERRASYNC, SG_DEBUG, "is orphan '" << it->file() << "'" ); orphans.push_back(it->file()); } else if (c->hash != hash) { + SG_LOG(SG_TERRASYNC, SG_DEBUG, "hash mismatch'" << it->file() ); // file exists, but hash mismatch, schedule update if (!hash.empty()) { - SG_LOG(SG_TERRASYNC, SG_INFO, "file exists but hash is wrong for:" << c->name); + SG_LOG(SG_TERRASYNC, SG_DEBUG, "file exists but hash is wrong for:" << it->file() ); + SG_LOG(SG_TERRASYNC, SG_DEBUG, "on disk:" << hash << " vs in info:" << c->hash); } - toBeUpdated.push_back(c->name); + toBeUpdated.push_back(it->file() ); } else { // file exists and hash is valid. If it's a directory, // perform a recursive check. + SG_LOG(SG_TERRASYNC, SG_DEBUG, "file exists hash is good:" << it->file() ); if (c->type == ChildInfo::DirectoryType) { SGPath p(relativePath()); - p.append(c->name); + p.append(it->file()); HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str()); childDir->updateChildrenBasedOnHash(); - } else { - SG_LOG(SG_TERRASYNC, SG_INFO, "existing file is ok:" << c->name); } } // remove existing file system children from the index list, // so we can detect new children - string_list::iterator it = std::find(indexNames.begin(), indexNames.end(), c->name); - if (it != indexNames.end()) { - indexNames.erase(it); - } + // https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Erase-Remove + indexNames.erase(std::remove(indexNames.begin(), indexNames.end(), it->file()), indexNames.end()); } // of real children iteration // all remaining names in indexChilden are new children @@ -280,13 +330,14 @@ public: continue; } + SG_LOG(SG_TERRASYNC,SG_DEBUG, "scheduling update for " << *it ); if (cit->type == ChildInfo::FileType) { - _repository->updateFile(this, *it); + _repository->updateFile(this, *it, cit->sizeInBytes); } else { SGPath p(relativePath()); p.append(*it); HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str()); - _repository->updateDir(childDir); + _repository->updateDir(childDir, cit->hash, cit->sizeInBytes); } } } @@ -303,19 +354,32 @@ public: return _relativePath; } - void didUpdateFile(const std::string& file, const std::string& hash) + void didUpdateFile(const std::string& file, const std::string& hash, size_t sz) { - SGPath fpath(_relativePath); - fpath.append(file); - _repository->updatedFileContents(fpath, hash); - SG_LOG(SG_TERRASYNC, SG_INFO, "did update:" << fpath); + // check hash matches what we expected + ChildInfoList::iterator it = findIndexChild(file); + if (it == children.end()) { + SG_LOG(SG_TERRASYNC, SG_WARN, "updated file but not found in dir:" << _relativePath << " " << file); + } else { + SGPath fpath(_relativePath); + fpath.append(file); + + if (it->hash != hash) { + _repository->failedToUpdateChild(_relativePath, HTTPRepository::REPO_ERROR_CHECKSUM); + } else { + _repository->updatedFileContents(fpath, hash); + _repository->totalDownloaded += sz; + //SG_LOG(SG_TERRASYNC, SG_INFO, "did update:" << fpath); + } // of hash matches + } // of found in child list } - void didFailToUpdateFile(const std::string& file) + void didFailToUpdateFile(const std::string& file, + HTTPRepository::ResultCode status) { SGPath fpath(_relativePath); fpath.append(file); - SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update:" << fpath); + _repository->failedToUpdateChild(fpath, status); } private: @@ -333,54 +397,66 @@ private: return std::find_if(children.begin(), children.end(), ChildWithName(name)); } - void parseDirIndex(ChildInfoList& children) + bool parseDirIndex(ChildInfoList& children) { SGPath p(absolutePath()); p.append(".dirindex"); - std::ifstream indexStream( p.str().c_str(), std::ios::in ); + if (!p.exists()) { + return false; + } + + std::ifstream indexStream( p.c_str(), std::ios::in ); if ( !indexStream.is_open() ) { throw sg_io_exception("cannot open dirIndex file", p); } - char lineBuffer[512]; - char* lastToken; - while (!indexStream.eof() ) { - indexStream.getline(lineBuffer, 512); - lastToken = 0; - char* typeData = ::strtok_r(lineBuffer, ":", &lastToken); - if (!typeData) { - continue; // skip blank line + std::string line; + std::getline( indexStream, line ); + line = simgear::strutils::strip(line); + + // skip blank line or comment beginning with '#' + if( line.empty() || line[0] == '#' ) + continue; + + string_list tokens = simgear::strutils::split( line, ":" ); + + std::string typeData = tokens[0]; + + if( typeData == "version" ) { + if( tokens.size() < 2 ) { + SG_LOG(SG_TERRASYNC, SG_WARN, "malformed .dirindex file: missing version number in line '" << line << "'" ); + break; + } + if( tokens[1] != "1" ) { + SG_LOG(SG_TERRASYNC, SG_WARN, "invalid .dirindex file: wrong version number '" << tokens[1] << "' (expected 1)" ); + break; + } + continue; // version is good, continue } - if (!typeData) { - // malformed entry - throw sg_io_exception("Malformed dir index file", p); + if( typeData == "path" ) { + continue; // ignore path, next line } - if (!strcmp(typeData, "version")) { - continue; - } else if (!strcmp(typeData, "path")) { + if( tokens.size() < 3 ) { + SG_LOG(SG_TERRASYNC, SG_WARN, "malformed .dirindex file: not enough tokens in line '" << line << "' (ignoring line)" ); continue; } - char* nameData = ::strtok_r(NULL, ":", &lastToken); - char* hashData = ::strtok_r(NULL, ":", &lastToken); - char* sizeData = ::strtok_r(NULL, ":", &lastToken); - - if (typeData[0] == 'f') { - children.push_back(ChildInfo(ChildInfo::FileType, nameData, hashData)); - } else if (typeData[0] == 'd') { - children.push_back(ChildInfo(ChildInfo::DirectoryType, nameData, hashData)); - } else { - throw sg_io_exception("Malformed line code in dir index file", p); + if (typeData != "f" && typeData != "d" ) { + SG_LOG(SG_TERRASYNC, SG_WARN, "malformed .dirindex file: invalid type in line '" << line << "', expected 'd' or 'f', (ignoring line)" ); + continue; } + children.push_back(ChildInfo(typeData == "f" ? ChildInfo::FileType : ChildInfo::DirectoryType, tokens[1], tokens[2])); - if (sizeData) { - children.back().setSize(sizeData); + if (tokens.size() > 3) { + children.back().setSize(tokens[3]); } } + + return true; } void removeChild(const std::string& name) @@ -402,6 +478,7 @@ private: if (!ok) { SG_LOG(SG_TERRASYNC, SG_WARN, "removal failed for:" << p); + throw sg_io_exception("Failed to remove existing file/dir:", p); } } @@ -427,6 +504,7 @@ HTTPRepository::HTTPRepository(const SGPath& base, HTTP::Client *cl) : _d->http = cl; _d->basePath = base; _d->rootDir = new HTTPDirectory(_d.get(), ""); + _d->parseHashCache(); } HTTPRepository::~HTTPRepository() @@ -461,7 +539,8 @@ void HTTPRepository::update() _d->status = REPO_NO_ERROR; _d->isUpdating = true; - _d->updateDir(_d->rootDir); + _d->failures.clear(); + _d->updateDir(_d->rootDir, std::string(), 0); } bool HTTPRepository::isDoingSync() const @@ -473,61 +552,108 @@ bool HTTPRepository::isDoingSync() const return _d->isUpdating; } -AbstractRepository::ResultCode +size_t HTTPRepository::bytesToDownload() const +{ + size_t result = 0; + + HTTPRepoPrivate::RequestVector::const_iterator r; + for (r = _d->queuedRequests.begin(); r != _d->queuedRequests.end(); ++r) { + result += (*r)->contentSize(); + } + + for (r = _d->activeRequests.begin(); r != _d->activeRequests.end(); ++r) { + result += (*r)->contentSize() - (*r)->responseBytesReceived(); + } + + return result; +} + +size_t HTTPRepository::bytesDownloaded() const +{ + size_t result = _d->totalDownloaded; + + HTTPRepoPrivate::RequestVector::const_iterator r; + for (r = _d->activeRequests.begin(); r != _d->activeRequests.end(); ++r) { + result += (*r)->responseBytesReceived(); + } + + return result; +} + +HTTPRepository::ResultCode HTTPRepository::failure() const { + if ((_d->status == REPO_NO_ERROR) && !_d->failures.empty()) { + return REPO_PARTIAL_UPDATE; + } + return _d->status; } - class FileGetRequest : public HTTP::Request + void HTTPRepoGetRequest::cancel() + { + _directory->repository()->http->cancelRequest(this, "Reposiotry cancelled"); + _directory = 0; + } + + class FileGetRequest : public HTTPRepoGetRequest { public: FileGetRequest(HTTPDirectory* d, const std::string& file) : - HTTP::Request(makeUrl(d, file)), - directory(d), - fileName(file), - fd(-1) + HTTPRepoGetRequest(d, makeUrl(d, file)), + fileName(file) { - SG_LOG(SG_TERRASYNC, SG_INFO, "will GET file " << url()); - + pathInRepo = _directory->absolutePath(); + pathInRepo.append(fileName); + //SG_LOG(SG_TERRASYNC, SG_INFO, "will GET file " << url()); } protected: virtual void gotBodyData(const char* s, int n) { - if (fd < 0) { - SGPath p(pathInRepo()); -#ifdef SG_WINDOWS - int mode = 00666; -#else - mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; -#endif - fd = ::open(p.c_str(), O_CREAT | O_TRUNC | O_RDWR, mode); - if (fd < 0) { - SG_LOG(SG_TERRASYNC, SG_WARN, "unable to create file " << p); - // fail + if (!file.get()) { + file.reset(new SGBinaryFile(pathInRepo.str())); + if (!file->open(SG_IO_OUT)) { + SG_LOG(SG_TERRASYNC, SG_WARN, "unable to create file " << pathInRepo); + _directory->repository()->http->cancelRequest(this, "Unable to create output file"); } + sha1_init(&hashContext); } - ::write(fd, s, n); sha1_write(&hashContext, s, n); - + file->write(s, n); } virtual void onDone() { - ::close(fd); + file->close(); if (responseCode() == 200) { - std::string hash = strutils::encodeHex((char*) sha1_result(&hashContext)); - directory->didUpdateFile(fileName, hash); - - SG_LOG(SG_TERRASYNC, SG_DEBUG, "got file " << fileName << " in " << directory->absolutePath()); + std::string hash = strutils::encodeHex(sha1_result(&hashContext), HASH_LENGTH); + _directory->didUpdateFile(fileName, hash, contentSize()); + SG_LOG(SG_TERRASYNC, SG_DEBUG, "got file " << fileName << " in " << _directory->absolutePath()); + } else if (responseCode() == 404) { + SG_LOG(SG_TERRASYNC, SG_WARN, "terrasync file not found on server: " << fileName << " for " << _directory->absolutePath()); + _directory->didFailToUpdateFile(fileName, HTTPRepository::REPO_ERROR_FILE_NOT_FOUND); } else { - directory->didFailToUpdateFile(fileName); + SG_LOG(SG_TERRASYNC, SG_WARN, "terrasync file download error on server: " << fileName << " for " << _directory->absolutePath() << ": " << responseCode() ); + _directory->didFailToUpdateFile(fileName, HTTPRepository::REPO_ERROR_HTTP); } - directory->repository()->finishedRequest(this); + _directory->repository()->finishedRequest(this); + } + + virtual void onFail() + { + file.reset(); + if (pathInRepo.exists()) { + pathInRepo.remove(); + } + + if (_directory) { + _directory->didFailToUpdateFile(fileName, HTTPRepository::REPO_ERROR_SOCKET); + _directory->repository()->finishedRequest(this); + } } private: static std::string makeUrl(HTTPDirectory* d, const std::string& file) @@ -535,30 +661,22 @@ HTTPRepository::failure() const return d->url() + "/" + file; } - SGPath pathInRepo() const - { - SGPath p(directory->absolutePath()); - p.append(fileName); - return p; - } - - HTTPDirectory* directory; std::string fileName; // if empty, we're getting the directory itself + SGPath pathInRepo; simgear::sha1nfo hashContext; - int fd; + std::auto_ptr file; }; - class DirGetRequest : public HTTP::Request + class DirGetRequest : public HTTPRepoGetRequest { public: - DirGetRequest(HTTPDirectory* d) : - HTTP::Request(makeUrl(d)), - directory(d), - _isRootDir(false) + DirGetRequest(HTTPDirectory* d, const std::string& targetHash) : + HTTPRepoGetRequest(d, makeUrl(d)), + _isRootDir(false), + _targetHash(targetHash) { sha1_init(&hashContext); - SG_LOG(SG_TERRASYNC, SG_INFO, "will GET dir " << url()); - + //SG_LOG(SG_TERRASYNC, SG_INFO, "will GET dir " << url()); } void setIsRootDir() @@ -581,11 +699,16 @@ HTTPRepository::failure() const virtual void onDone() { if (responseCode() == 200) { - std::string hash = strutils::encodeHex((char*) sha1_result(&hashContext)); - std::string curHash = directory->repository()->hashForPath(path()); - if (hash != curHash) { + std::string hash = strutils::encodeHex(sha1_result(&hashContext), HASH_LENGTH); + if (!_targetHash.empty() && (hash != _targetHash)) { + _directory->failedToUpdate(HTTPRepository::REPO_ERROR_CHECKSUM); + _directory->repository()->finishedRequest(this); + return; + } - simgear::Dir d(directory->absolutePath()); + std::string curHash = _directory->repository()->hashForPath(path()); + if (hash != curHash) { + simgear::Dir d(_directory->absolutePath()); if (!d.exists()) { if (!d.create(0700)) { throw sg_io_exception("Unable to create directory", d.path()); @@ -594,24 +717,45 @@ HTTPRepository::failure() const // dir index data has changed, so write to disk and update // the hash accordingly - std::ofstream of(pathInRepo().str().c_str(), std::ios::trunc | std::ios::out); - assert(of.is_open()); + std::ofstream of(pathInRepo().c_str(), std::ios::trunc | std::ios::out); + if (!of.is_open()) { + throw sg_io_exception("Failed to open directory index file for writing", pathInRepo().c_str()); + } + of.write(body.data(), body.size()); of.close(); - directory->dirIndexUpdated(hash); - - SG_LOG(SG_TERRASYNC, SG_DEBUG, "updated dir index " << directory->absolutePath()); + _directory->dirIndexUpdated(hash); + //SG_LOG(SG_TERRASYNC, SG_INFO, "updated dir index " << _directory->absolutePath()); } - // either way we've confirmed the index is valid so update - // children now - directory->updateChildrenBasedOnHash(); + _directory->repository()->totalDownloaded += contentSize(); + + try { + // either way we've confirmed the index is valid so update + // children now + SGTimeStamp st; + st.stamp(); + _directory->updateChildrenBasedOnHash(); + SG_LOG(SG_TERRASYNC, SG_INFO, "after update of:" << _directory->absolutePath() << " child update took:" << st.elapsedMSec()); + } catch (sg_exception& ) { + _directory->failedToUpdate(HTTPRepository::REPO_ERROR_IO); + } + } else if (responseCode() == 404) { + _directory->failedToUpdate(HTTPRepository::REPO_ERROR_FILE_NOT_FOUND); } else { - directory->failedToUpdate(); + _directory->failedToUpdate(HTTPRepository::REPO_ERROR_HTTP); } - directory->repository()->finishedRequest(this); + _directory->repository()->finishedRequest(this); + } + + virtual void onFail() + { + if (_directory) { + _directory->failedToUpdate(HTTPRepository::REPO_ERROR_SOCKET); + _directory->repository()->finishedRequest(this); + } } private: static std::string makeUrl(HTTPDirectory* d) @@ -621,31 +765,43 @@ HTTPRepository::failure() const SGPath pathInRepo() const { - SGPath p(directory->absolutePath()); + SGPath p(_directory->absolutePath()); p.append(".dirindex"); return p; } - HTTPDirectory* directory; simgear::sha1nfo hashContext; std::string body; bool _isRootDir; ///< is this the repository root? + std::string _targetHash; }; + HTTPRepoPrivate::~HTTPRepoPrivate() + { + DirectoryVector::iterator it; + for (it=directories.begin(); it != directories.end(); ++it) { + delete *it; + } + + RequestVector::iterator r; + for (r=activeRequests.begin(); r != activeRequests.end(); ++r) { + (*r)->cancel(); + } + } - HTTP::Request_ptr HTTPRepoPrivate::updateFile(HTTPDirectory* dir, const std::string& name) + HTTP::Request_ptr HTTPRepoPrivate::updateFile(HTTPDirectory* dir, const std::string& name, size_t sz) { - HTTP::Request_ptr r(new FileGetRequest(dir, name)); - http->makeRequest(r); - requests.push_back(r); + RepoRequestPtr r(new FileGetRequest(dir, name)); + r->setContentSize(sz); + makeRequest(r); return r; } - HTTP::Request_ptr HTTPRepoPrivate::updateDir(HTTPDirectory* dir) + HTTP::Request_ptr HTTPRepoPrivate::updateDir(HTTPDirectory* dir, const std::string& hash, size_t sz) { - HTTP::Request_ptr r(new DirGetRequest(dir)); - http->makeRequest(r); - requests.push_back(r); + RepoRequestPtr r(new DirGetRequest(dir, hash)); + r->setContentSize(sz); + makeRequest(r); return r; } @@ -687,15 +843,15 @@ HTTPRepository::failure() const sha1_init(&info); char* buf = static_cast(malloc(1024 * 1024)); size_t readLen; - int fd = ::open(p.c_str(), O_RDONLY); - if (fd < 0) { + SGBinaryFile f(p.str()); + if (!f.open(SG_IO_IN)) { throw sg_io_exception("Couldn't open file for compute hash", p); } - while ((readLen = ::read(fd, buf, 1024 * 1024)) > 0) { + while ((readLen = f.read(buf, 1024 * 1024)) > 0) { sha1_write(&info, buf, readLen); } - ::close(fd); + f.close(); free(buf); std::string hashBytes((char*) sha1_result(&info), HASH_LENGTH); return strutils::encodeHex(hashBytes); @@ -707,6 +863,7 @@ HTTPRepository::failure() const HashCache::iterator it = std::find_if(hashes.begin(), hashes.end(), HashEntryWithPath(p.str())); if (it != hashes.end()) { hashes.erase(it); + hashCacheDirty = true; } if (newHash.empty()) { @@ -725,21 +882,26 @@ HTTPRepository::failure() const entry.lengthBytes = p2.sizeInBytes(); hashes.push_back(entry); - writeHashCache(); + hashCacheDirty = true; } void HTTPRepoPrivate::writeHashCache() { + if (!hashCacheDirty) { + return; + } + SGPath cachePath = basePath; cachePath.append(".hashes"); - std::ofstream stream(cachePath.str().c_str(),std::ios::out | std::ios::trunc); + std::ofstream stream(cachePath.c_str(),std::ios::out | std::ios::trunc); HashCache::const_iterator it; for (it = hashes.begin(); it != hashes.end(); ++it) { stream << it->filePath << ":" << it->modTime << ":" << it->lengthBytes << ":" << it->hashHex << "\n"; } stream.close(); + hashCacheDirty = false; } void HTTPRepoPrivate::parseHashCache() @@ -751,26 +913,35 @@ HTTPRepository::failure() const return; } - std::ifstream stream(cachePath.str().c_str(), std::ios::in); - char buf[2048]; - char* lastToken; + std::ifstream stream(cachePath.c_str(), std::ios::in); while (!stream.eof()) { - stream.getline(buf, 2048); - lastToken = 0; - char* nameData = ::strtok_r(buf, ":", &lastToken); - char* timeData = ::strtok_r(NULL, ":", &lastToken); - char* sizeData = ::strtok_r(NULL, ":", &lastToken); - char* hashData = ::strtok_r(NULL, ":", &lastToken); - if (!nameData || !timeData || !sizeData || !hashData) { + std::string line; + std::getline(stream,line); + line = simgear::strutils::strip(line); + if( line.empty() || line[0] == '#' ) + continue; + + string_list tokens = simgear::strutils::split( line, ":" ); + if( tokens.size() < 4 ) { + SG_LOG(SG_TERRASYNC, SG_WARN, "invalid entry in '" << cachePath.str() << "': '" << line << "' (ignoring line)"); + continue; + } + const std::string nameData = simgear::strutils::strip(tokens[0]); + const std::string timeData = simgear::strutils::strip(tokens[1]); + const std::string sizeData = simgear::strutils::strip(tokens[2]); + const std::string hashData = simgear::strutils::strip(tokens[3]); + + if (nameData.empty() || timeData.empty() || sizeData.empty() || hashData.empty() ) { + SG_LOG(SG_TERRASYNC, SG_WARN, "invalid entry in '" << cachePath.str() << "': '" << line << "' (ignoring line)"); continue; } HashCacheEntry entry; entry.filePath = nameData; entry.hashHex = hashData; - entry.modTime = strtol(timeData, NULL, 10); - entry.lengthBytes = strtol(sizeData, NULL, 10); + entry.modTime = strtol(timeData.c_str(), NULL, 10); + entry.lengthBytes = strtol(sizeData.c_str(), NULL, 10); hashes.push_back(entry); } } @@ -818,23 +989,55 @@ HTTPRepository::failure() const return false; } - void HTTPRepoPrivate::finishedRequest(const HTTP::Request_ptr& req) + void HTTPRepoPrivate::makeRequest(RepoRequestPtr req) { - RequestVector::iterator it = std::find(requests.begin(), requests.end(), req); - if (it == requests.end()) { - throw sg_exception("lost request somehow"); + if (activeRequests.size() > 4) { + queuedRequests.push_back(req); + } else { + activeRequests.push_back(req); + http->makeRequest(req); } - requests.erase(it); - if (requests.empty()) { + } + + void HTTPRepoPrivate::finishedRequest(const RepoRequestPtr& req) + { + RequestVector::iterator it = std::find(activeRequests.begin(), activeRequests.end(), req); + if (it == activeRequests.end()) { + throw sg_exception("lost request somehow", req->url()); + } + activeRequests.erase(it); + + if (!queuedRequests.empty()) { + RepoRequestPtr rr = queuedRequests.front(); + queuedRequests.erase(queuedRequests.begin()); + activeRequests.push_back(rr); + http->makeRequest(rr); + } + + writeHashCache(); + + if (activeRequests.empty() && queuedRequests.empty()) { isUpdating = false; } } - void HTTPRepoPrivate::failedToGetRootIndex() + void HTTPRepoPrivate::failedToGetRootIndex(HTTPRepository::ResultCode st) { SG_LOG(SG_TERRASYNC, SG_WARN, "Failed to get root of repo:" << baseUrl); - status = AbstractRepository::REPO_ERROR_NOT_FOUND; + status = st; } + void HTTPRepoPrivate::failedToUpdateChild(const SGPath& relativePath, + HTTPRepository::ResultCode fileStatus) + { + Failure f; + f.path = relativePath; + f.error = fileStatus; + failures.push_back(f); + + SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update entry:" << relativePath << " code:" << fileStatus); + } + + } // of namespace simgear