From: James Turner Date: Tue, 26 Jan 2016 00:29:25 +0000 (-0600) Subject: HTTP repository implementation X-Git-Url: https://git.mxchange.org/?a=commitdiff_plain;h=ae4d96872df7f72400d91ff6ace52b23333cf0d0;p=simgear.git HTTP repository implementation A plain-HTTP terrasync repository implementation, using the SimGear HTTP abstraction. File validity is based on SHA hashes, and existing files are not re-downloaded if their hash matches, so soft upgrade from an SVN checkout is possible. --- diff --git a/simgear/io/CMakeLists.txt b/simgear/io/CMakeLists.txt index 61d80ea0..cfe6c7fd 100644 --- a/simgear/io/CMakeLists.txt +++ b/simgear/io/CMakeLists.txt @@ -23,6 +23,7 @@ set(HEADERS SVNRepository.hxx SVNDirectory.hxx SVNReportParser.hxx + HTTPRepository.hxx ) set(SOURCES @@ -46,6 +47,7 @@ set(SOURCES SVNRepository.cxx SVNDirectory.cxx SVNReportParser.cxx + HTTPRepository.cxx ) if (NOT ENABLE_CURL) @@ -79,4 +81,8 @@ target_link_libraries(test_binobj ${TEST_LIBS}) add_test(binobj ${EXECUTABLE_OUTPUT_PATH}/test_binobj) +add_executable(test_repository test_repository.cxx) +target_link_libraries(test_repository ${TEST_LIBS}) +add_test(http_repository ${EXECUTABLE_OUTPUT_PATH}/test_repository) + endif(ENABLE_TESTS) diff --git a/simgear/io/HTTPRepository.cxx b/simgear/io/HTTPRepository.cxx new file mode 100644 index 00000000..aba7224c --- /dev/null +++ b/simgear/io/HTTPRepository.cxx @@ -0,0 +1,839 @@ +// HTTPRepository.cxx -- plain HTTP TerraSync remote client +// +// Copyright (C) 20126 James Turner +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +#include "HTTPRepository.hxx" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "simgear/debug/logstream.hxx" +#include "simgear/misc/strutils.hxx" +#include +#include +#include +#include +#include + +#include + +namespace simgear +{ + + class HTTPDirectory; + +class HTTPRepoPrivate +{ +public: + struct HashCacheEntry + { + std::string filePath; + time_t modTime; + size_t lengthBytes; + std::string hashHex; + + }; + + typedef std::vector HashCache; + HashCache hashes; + + HTTPRepoPrivate(HTTPRepository* parent) : + p(parent), + isUpdating(false), + status(AbstractRepository::REPO_NO_ERROR) + { ; } + + HTTPRepository* p; // link back to outer + HTTP::Client* http; + std::string baseUrl; + SGPath basePath; + bool isUpdating; + AbstractRepository::ResultCode status; + HTTPDirectory* rootDir; + + HTTP::Request_ptr updateFile(HTTPDirectory* dir, const std::string& name); + HTTP::Request_ptr updateDir(HTTPDirectory* dir); + + std::string hashForPath(const SGPath& p); + void updatedFileContents(const SGPath& p, const std::string& newHash); + void parseHashCache(); + std::string computeHashForPath(const SGPath& p); + void writeHashCache(); + + void failedToGetRootIndex(); + + typedef std::vector RequestVector; + RequestVector requests; + + void finishedRequest(const HTTP::Request_ptr& req); + + HTTPDirectory* getOrCreateDirectory(const std::string& path); + bool deleteDirectory(const std::string& path); + + typedef std::vector DirectoryVector; + DirectoryVector directories; + +}; + +class HTTPDirectory +{ + struct ChildInfo + { + enum Type + { + FileType, + DirectoryType + }; + + ChildInfo(Type ty, const char* nameData, const char* hashData) : + type(ty), + name(nameData), + hash(hashData ? hashData : ""), + sizeInBytes(0) + { + } + + ChildInfo(const ChildInfo& other) : + type(other.type), + name(other.name), + hash(other.hash), + sizeInBytes(other.sizeInBytes) + { } + + void setSize(const char* sizeData) + { + sizeInBytes = ::strtol(sizeData, NULL, 10); + } + + bool operator<(const ChildInfo& other) const + { + return name < other.name; + } + + Type type; + std::string name, hash; + size_t sizeInBytes; + }; + + typedef std::vector ChildInfoList; + ChildInfoList children; + +public: + HTTPDirectory(HTTPRepoPrivate* repo, const std::string& path) : + _repository(repo), + _relativePath(path) + { + SGPath p(absolutePath()); + if (p.exists()) { + bool indexValid = false; + try { + // already exists on disk + parseDirIndex(children); + indexValid = true; + std::sort(children.begin(), children.end()); + } catch (sg_exception& e) { + // parsing cache failed + children.clear(); + } + } + } + + HTTPRepoPrivate* repository() const + { + return _repository; + } + + std::string url() const + { + if (_relativePath.str().empty()) { + return _repository->baseUrl; + } + + return _repository->baseUrl + "/" + _relativePath.str(); + } + + void dirIndexUpdated(const std::string& hash) + { + SGPath fpath(_relativePath); + fpath.append(".dirindex"); + _repository->updatedFileContents(fpath, hash); + + children.clear(); + parseDirIndex(children); + std::sort(children.begin(), children.end()); + } + + void failedToUpdate() + { + if (_relativePath.isNull()) { + // root dir failed + _repository->failedToGetRootIndex(); + } else { + SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update dir:" << _relativePath); + } + } + + void updateChildrenBasedOnHash() + { + SG_LOG(SG_TERRASYNC, SG_DEBUG, "updated children for:" << relativePath()); + + string_list indexNames = indexChildren(), + toBeUpdated, orphans; + simgear::Dir d(absolutePath()); + PathList fsChildren = d.children(0); + PathList::const_iterator it = fsChildren.begin(); + + for (; it != fsChildren.end(); ++it) { + ChildInfo info(it->isDir() ? ChildInfo::DirectoryType : ChildInfo::FileType, + it->file().c_str(), NULL); + std::string hash = hashForChild(info); + + ChildInfoList::iterator c = findIndexChild(it->file()); + if (c == children.end()) { + orphans.push_back(it->file()); + } else if (c->hash != hash) { + // file exists, but hash mismatch, schedule update + if (!hash.empty()) { + SG_LOG(SG_TERRASYNC, SG_INFO, "file exists but hash is wrong for:" << c->name); + } + + toBeUpdated.push_back(c->name); + } else { + // file exists and hash is valid. If it's a directory, + // perform a recursive check. + if (c->type == ChildInfo::DirectoryType) { + SGPath p(relativePath()); + p.append(c->name); + HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str()); + childDir->updateChildrenBasedOnHash(); + } else { + SG_LOG(SG_TERRASYNC, SG_INFO, "existing file is ok:" << c->name); + } + } + + // remove existing file system children from the index list, + // so we can detect new children + string_list::iterator it = std::find(indexNames.begin(), indexNames.end(), c->name); + if (it != indexNames.end()) { + indexNames.erase(it); + } + } // of real children iteration + + // all remaining names in indexChilden are new children + toBeUpdated.insert(toBeUpdated.end(), indexNames.begin(), indexNames.end()); + + removeOrphans(orphans); + scheduleUpdates(toBeUpdated); + } + + void removeOrphans(const string_list& orphans) + { + string_list::const_iterator it; + for (it = orphans.begin(); it != orphans.end(); ++it) { + removeChild(*it); + } + } + + string_list indexChildren() const + { + string_list r; + r.reserve(children.size()); + ChildInfoList::const_iterator it; + for (it=children.begin(); it != children.end(); ++it) { + r.push_back(it->name); + } + return r; + } + + void scheduleUpdates(const string_list& names) + { + string_list::const_iterator it; + for (it = names.begin(); it != names.end(); ++it) { + ChildInfoList::iterator cit = findIndexChild(*it); + if (cit == children.end()) { + SG_LOG(SG_TERRASYNC, SG_WARN, "scheduleUpdate, unknown child:" << *it); + continue; + } + + if (cit->type == ChildInfo::FileType) { + _repository->updateFile(this, *it); + } else { + SGPath p(relativePath()); + p.append(*it); + HTTPDirectory* childDir = _repository->getOrCreateDirectory(p.str()); + _repository->updateDir(childDir); + } + } + } + + SGPath absolutePath() const + { + SGPath r(_repository->basePath); + r.append(_relativePath.str()); + return r; + } + + SGPath relativePath() const + { + return _relativePath; + } + + void didUpdateFile(const std::string& file, const std::string& hash) + { + SGPath fpath(_relativePath); + fpath.append(file); + _repository->updatedFileContents(fpath, hash); + SG_LOG(SG_TERRASYNC, SG_INFO, "did update:" << fpath); + } + + void didFailToUpdateFile(const std::string& file) + { + SGPath fpath(_relativePath); + fpath.append(file); + SG_LOG(SG_TERRASYNC, SG_WARN, "failed to update:" << fpath); + } +private: + + struct ChildWithName + { + ChildWithName(const std::string& n) : name(n) {} + std::string name; + + bool operator()(const ChildInfo& info) const + { return info.name == name; } + }; + + ChildInfoList::iterator findIndexChild(const std::string& name) + { + return std::find_if(children.begin(), children.end(), ChildWithName(name)); + } + + void parseDirIndex(ChildInfoList& children) + { + SGPath p(absolutePath()); + p.append(".dirindex"); + std::ifstream indexStream( p.str(), std::ios::in ); + + if ( !indexStream.is_open() ) { + throw sg_io_exception("cannot open dirIndex file", p); + } + + char lineBuffer[512]; + char* lastToken; + + while (!indexStream.eof() ) { + indexStream.getline(lineBuffer, 512); + lastToken = 0; + char* typeData = ::strtok_r(lineBuffer, ":", &lastToken); + if (!typeData) { + continue; // skip blank line + } + + if (!typeData) { + // malformed entry + throw sg_io_exception("Malformed dir index file", p); + } + + if (!strcmp(typeData, "version")) { + continue; + } else if (!strcmp(typeData, "path")) { + continue; + } + + char* nameData = ::strtok_r(NULL, ":", &lastToken); + char* hashData = ::strtok_r(NULL, ":", &lastToken); + char* sizeData = ::strtok_r(NULL, ":", &lastToken); + + if (typeData[0] == 'f') { + children.push_back(ChildInfo(ChildInfo::FileType, nameData, hashData)); + } else if (typeData[0] == 'd') { + children.push_back(ChildInfo(ChildInfo::DirectoryType, nameData, hashData)); + } else { + throw sg_io_exception("Malformed line code in dir index file", p); + } + + if (sizeData) { + children.back().setSize(sizeData); + } + } + } + + void removeChild(const std::string& name) + { + SGPath p(absolutePath()); + p.append(name); + bool ok; + + SGPath fpath(_relativePath); + fpath.append(name); + + if (p.isDir()) { + ok = _repository->deleteDirectory(fpath.str()); + } else { + // remove the hash cache entry + _repository->updatedFileContents(fpath, std::string()); + ok = p.remove(); + } + + if (!ok) { + SG_LOG(SG_TERRASYNC, SG_WARN, "removal failed for:" << p); + } + } + + std::string hashForChild(const ChildInfo& child) const + { + SGPath p(absolutePath()); + p.append(child.name); + if (child.type == ChildInfo::DirectoryType) { + p.append(".dirindex"); + } + return _repository->hashForPath(p); + } + + HTTPRepoPrivate* _repository; + SGPath _relativePath; // in URL and file-system space + + +}; + +HTTPRepository::HTTPRepository(const SGPath& base, HTTP::Client *cl) : + _d(new HTTPRepoPrivate(this)) +{ + _d->http = cl; + _d->basePath = base; + _d->rootDir = new HTTPDirectory(_d.get(), ""); +} + +HTTPRepository::~HTTPRepository() +{ +} + +void HTTPRepository::setBaseUrl(const std::string &url) +{ + _d->baseUrl = url; +} + +std::string HTTPRepository::baseUrl() const +{ + return _d->baseUrl; +} + +HTTP::Client* HTTPRepository::http() const +{ + return _d->http; +} + +SGPath HTTPRepository::fsBase() const +{ + return SGPath(); +} + +void HTTPRepository::update() +{ + if (_d->isUpdating) { + return; + } + + _d->status = REPO_NO_ERROR; + _d->isUpdating = true; + _d->updateDir(_d->rootDir); +} + +bool HTTPRepository::isDoingSync() const +{ + if (_d->status != REPO_NO_ERROR) { + return false; + } + + return _d->isUpdating; +} + +AbstractRepository::ResultCode +HTTPRepository::failure() const +{ + return _d->status; +} + + class FileGetRequest : public HTTP::Request + { + public: + FileGetRequest(HTTPDirectory* d, const std::string& file) : + HTTP::Request(makeUrl(d, file)), + directory(d), + fileName(file), + fd(-1) + { + SG_LOG(SG_TERRASYNC, SG_INFO, "will GET file " << url()); + + } + + protected: + virtual void gotBodyData(const char* s, int n) + { + if (fd < 0) { + SGPath p(pathInRepo()); +#ifdef SG_WINDOWS + int mode = 00666; +#else + mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH; +#endif + fd = ::open(p.c_str(), O_CREAT | O_TRUNC | O_RDWR, mode); + if (fd < 0) { + SG_LOG(SG_TERRASYNC, SG_WARN, "unable to create file " << p); + // fail + } + sha1_init(&hashContext); + } + + ::write(fd, s, n); + sha1_write(&hashContext, s, n); + + } + + virtual void onDone() + { + ::close(fd); + if (responseCode() == 200) { + std::string hash = strutils::encodeHex((char*) sha1_result(&hashContext)); + directory->didUpdateFile(fileName, hash); + + SG_LOG(SG_TERRASYNC, SG_DEBUG, "got file " << fileName << " in " << directory->absolutePath()); + } else { + directory->didFailToUpdateFile(fileName); + } + + directory->repository()->finishedRequest(this); + } + private: + static std::string makeUrl(HTTPDirectory* d, const std::string& file) + { + return d->url() + "/" + file; + } + + SGPath pathInRepo() const + { + SGPath p(directory->absolutePath()); + p.append(fileName); + return p; + } + + HTTPDirectory* directory; + std::string fileName; // if empty, we're getting the directory itself + simgear::sha1nfo hashContext; + int fd; + }; + + class DirGetRequest : public HTTP::Request + { + public: + DirGetRequest(HTTPDirectory* d) : + HTTP::Request(makeUrl(d)), + directory(d), + _isRootDir(false) + { + sha1_init(&hashContext); + SG_LOG(SG_TERRASYNC, SG_INFO, "will GET dir " << url()); + + } + + void setIsRootDir() + { + _isRootDir = true; + } + + bool isRootDir() const + { + return _isRootDir; + } + + protected: + virtual void gotBodyData(const char* s, int n) + { + body += std::string(s, n); + sha1_write(&hashContext, s, n); + } + + virtual void onDone() + { + if (responseCode() == 200) { + std::string hash = strutils::encodeHex((char*) sha1_result(&hashContext)); + std::string curHash = directory->repository()->hashForPath(path()); + if (hash != curHash) { + + simgear::Dir d(directory->absolutePath()); + if (!d.exists()) { + if (!d.create(0700)) { + throw sg_io_exception("Unable to create directory", d.path()); + } + } + + // dir index data has changed, so write to disk and update + // the hash accordingly + std::ofstream of(pathInRepo().str(), std::ios::trunc | std::ios::out); + assert(of.is_open()); + of.write(body.data(), body.size()); + of.close(); + directory->dirIndexUpdated(hash); + + SG_LOG(SG_TERRASYNC, SG_DEBUG, "updated dir index " << directory->absolutePath()); + + } + + // either way we've confirmed the index is valid so update + // children now + directory->updateChildrenBasedOnHash(); + } else { + directory->failedToUpdate(); + } + + directory->repository()->finishedRequest(this); + } + private: + static std::string makeUrl(HTTPDirectory* d) + { + return d->url() + "/.dirindex"; + } + + SGPath pathInRepo() const + { + SGPath p(directory->absolutePath()); + p.append(".dirindex"); + return p; + } + + HTTPDirectory* directory; + simgear::sha1nfo hashContext; + std::string body; + bool _isRootDir; ///< is this the repository root? + }; + + + HTTP::Request_ptr HTTPRepoPrivate::updateFile(HTTPDirectory* dir, const std::string& name) + { + HTTP::Request_ptr r(new FileGetRequest(dir, name)); + http->makeRequest(r); + requests.push_back(r); + return r; + } + + HTTP::Request_ptr HTTPRepoPrivate::updateDir(HTTPDirectory* dir) + { + HTTP::Request_ptr r(new DirGetRequest(dir)); + http->makeRequest(r); + requests.push_back(r); + return r; + } + + + class HashEntryWithPath + { + public: + HashEntryWithPath(const std::string& p) : path(p) {} + bool operator()(const HTTPRepoPrivate::HashCacheEntry& entry) const + { return entry.filePath == path; } + private: + std::string path; + }; + + std::string HTTPRepoPrivate::hashForPath(const SGPath& p) + { + HashCache::iterator it = std::find_if(hashes.begin(), hashes.end(), HashEntryWithPath(p.str())); + if (it != hashes.end()) { + // ensure data on disk hasn't changed. + // we could also use the file type here if we were paranoid + if ((p.sizeInBytes() == it->lengthBytes) && (p.modTime() == it->modTime)) { + return it->hashHex; + } + + // entry in the cache, but it's stale so remove and fall through + hashes.erase(it); + } + + std::string hash = computeHashForPath(p); + updatedFileContents(p, hash); + return hash; + } + + std::string HTTPRepoPrivate::computeHashForPath(const SGPath& p) + { + if (!p.exists()) + return std::string(); + sha1nfo info; + sha1_init(&info); + char* buf = static_cast(malloc(1024 * 1024)); + size_t readLen; + int fd = ::open(p.c_str(), O_RDONLY); + if (fd < 0) { + throw sg_io_exception("Couldn't open file for compute hash", p); + } + while ((readLen = ::read(fd, buf, 1024 * 1024)) > 0) { + sha1_write(&info, buf, readLen); + } + + ::close(fd); + free(buf); + std::string hashBytes((char*) sha1_result(&info), HASH_LENGTH); + return strutils::encodeHex(hashBytes); + } + + void HTTPRepoPrivate::updatedFileContents(const SGPath& p, const std::string& newHash) + { + // remove the existing entry + HashCache::iterator it = std::find_if(hashes.begin(), hashes.end(), HashEntryWithPath(p.str())); + if (it != hashes.end()) { + hashes.erase(it); + } + + if (newHash.empty()) { + return; // we're done + } + + // use a cloned SGPath and reset its caching to force one stat() call + SGPath p2(p); + p2.set_cached(false); + p2.set_cached(true); + + HashCacheEntry entry; + entry.filePath = p.str(); + entry.hashHex = newHash; + entry.modTime = p2.modTime(); + entry.lengthBytes = p2.sizeInBytes(); + hashes.push_back(entry); + + writeHashCache(); + } + + void HTTPRepoPrivate::writeHashCache() + { + SGPath cachePath = basePath; + cachePath.append(".hashes"); + + std::ofstream stream(cachePath.str(),std::ios::out | std::ios::trunc); + HashCache::const_iterator it; + for (it = hashes.begin(); it != hashes.end(); ++it) { + stream << it->filePath << ":" << it->modTime << ":" + << it->lengthBytes << ":" << it->hashHex << "\n"; + } + stream.close(); + } + + void HTTPRepoPrivate::parseHashCache() + { + hashes.clear(); + SGPath cachePath = basePath; + cachePath.append(".hashes"); + if (!cachePath.exists()) { + return; + } + + std::ifstream stream(cachePath.str(), std::ios::in); + char buf[2048]; + char* lastToken; + + while (!stream.eof()) { + stream.getline(buf, 2048); + lastToken = 0; + char* nameData = ::strtok_r(buf, ":", &lastToken); + char* timeData = ::strtok_r(NULL, ":", &lastToken); + char* sizeData = ::strtok_r(NULL, ":", &lastToken); + char* hashData = ::strtok_r(NULL, ":", &lastToken); + if (!nameData || !timeData || !sizeData || !hashData) { + continue; + } + + HashCacheEntry entry; + entry.filePath = nameData; + entry.hashHex = hashData; + entry.modTime = strtol(timeData, NULL, 10); + entry.lengthBytes = strtol(sizeData, NULL, 10); + hashes.push_back(entry); + } + } + + class DirectoryWithPath + { + public: + DirectoryWithPath(const std::string& p) : path(p) {} + bool operator()(const HTTPDirectory* entry) const + { return entry->relativePath().str() == path; } + private: + std::string path; + }; + + HTTPDirectory* HTTPRepoPrivate::getOrCreateDirectory(const std::string& path) + { + DirectoryWithPath p(path); + DirectoryVector::iterator it = std::find_if(directories.begin(), directories.end(), p); + if (it != directories.end()) { + return *it; + } + + HTTPDirectory* d = new HTTPDirectory(this, path); + directories.push_back(d); + return d; + } + + bool HTTPRepoPrivate::deleteDirectory(const std::string& path) + { + DirectoryWithPath p(path); + DirectoryVector::iterator it = std::find_if(directories.begin(), directories.end(), p); + if (it != directories.end()) { + HTTPDirectory* d = *it; + directories.erase(it); + Dir dir(d->absolutePath()); + bool result = dir.remove(true); + delete d; + + // update the hash cache too + updatedFileContents(path, std::string()); + + return result; + } + + return false; + } + + void HTTPRepoPrivate::finishedRequest(const HTTP::Request_ptr& req) + { + RequestVector::iterator it = std::find(requests.begin(), requests.end(), req); + if (it == requests.end()) { + throw sg_exception("lost request somehow"); + } + requests.erase(it); + if (requests.empty()) { + isUpdating = false; + } + } + + void HTTPRepoPrivate::failedToGetRootIndex() + { + SG_LOG(SG_TERRASYNC, SG_WARN, "Failed to get root of repo:" << baseUrl); + status = AbstractRepository::REPO_ERROR_NOT_FOUND; + } + + +} // of namespace simgear diff --git a/simgear/io/HTTPRepository.hxx b/simgear/io/HTTPRepository.hxx new file mode 100644 index 00000000..c3953011 --- /dev/null +++ b/simgear/io/HTTPRepository.hxx @@ -0,0 +1,56 @@ +// HTTPRepository.hxx - plain HTTP TerraSync remote server client +// +// Copyright (C) 2016 James Turner +// +// This program is free software; you can redistribute it and/or +// modify it under the terms of the GNU General Public License as +// published by the Free Software Foundation; either version 2 of the +// License, or (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, but +// WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program; if not, write to the Free Software +// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + +#ifndef SG_IO_HTTP_REPOSITORY_HXX +#define SG_IO_HTTP_REPOSITORY_HXX + +#include + +namespace simgear { + +class HTTPRepoPrivate; + +class HTTPRepository : public AbstractRepository +{ +public: + + HTTPRepository(const SGPath& root, HTTP::Client* cl); + virtual ~HTTPRepository(); + + virtual SGPath fsBase() const; + + virtual void setBaseUrl(const std::string& url); + virtual std::string baseUrl() const; + + virtual HTTP::Client* http() const; + + virtual void update(); + + virtual bool isDoingSync() const; + + virtual ResultCode failure() const; +private: + bool isBare() const; + + std::auto_ptr _d; +}; + +} // of namespace simgear + +#endif // of HTTPRepository diff --git a/simgear/io/test_HTTP.cxx b/simgear/io/test_HTTP.cxx index 0703fe8e..5a33a28c 100644 --- a/simgear/io/test_HTTP.cxx +++ b/simgear/io/test_HTTP.cxx @@ -12,7 +12,8 @@ #include "HTTPClient.hxx" #include "HTTPRequest.hxx" -#include +#include "test_HTTP.hxx" + #include #include #include @@ -96,97 +97,12 @@ protected: } }; -class TestServerChannel : public NetChat +class HTTPTestChannel : public TestServerChannel { public: - enum State - { - STATE_IDLE = 0, - STATE_HEADERS, - STATE_CLOSING, - STATE_REQUEST_BODY - }; - - TestServerChannel() - { - state = STATE_IDLE; - setTerminator("\r\n"); - - } - - virtual void collectIncomingData(const char* s, int n) - { - buffer += string(s, n); - } - - virtual void foundTerminator(void) - { - if (state == STATE_IDLE) { - state = STATE_HEADERS; - string_list line = strutils::split(buffer, NULL, 3); - if (line.size() < 3) { - cerr << "malformed request:" << buffer << endl; - exit(-1); - } - - method = line[0]; - path = line[1]; - - string::size_type queryPos = path.find('?'); - if (queryPos != string::npos) { - parseArgs(path.substr(queryPos + 1)); - path = path.substr(0, queryPos); - } - - httpVersion = line[2]; - requestHeaders.clear(); - buffer.clear(); - } else if (state == STATE_HEADERS) { - string s = strutils::simplify(buffer); - if (s.empty()) { - buffer.clear(); - receivedRequestHeaders(); - return; - } - - string::size_type colonPos = buffer.find(':'); - if (colonPos == string::npos) { - cerr << "test malformed HTTP response header:" << buffer << endl; - buffer.clear(); - return; - } - - string key = strutils::simplify(buffer.substr(0, colonPos)); - string value = strutils::strip(buffer.substr(colonPos + 1)); - requestHeaders[key] = value; - buffer.clear(); - } else if (state == STATE_REQUEST_BODY) { - receivedBody(); - setTerminator("\r\n"); - } else if (state == STATE_CLOSING) { - // ignore! - } - } - void parseArgs(const string& argData) + virtual void processRequestHeaders() { - string_list argv = strutils::split(argData, "&"); - for (unsigned int a=0; a requestHeaders; - std::map args; - int requestContentLength; -}; - -class TestServer : public NetChannel -{ - simgear::NetChannelPoller _poller; -public: - TestServer() - { - Socket::initSockets(); - - open(); - bind(NULL, 2000); // localhost, any port - listen(5); - - _poller.addChannel(this); - } - - virtual ~TestServer() - { - } - - virtual bool writable (void) { return false ; } - - virtual void handleAccept (void) - { - simgear::IPAddress addr ; - int handle = accept ( &addr ) ; - //cout << "did accept from " << addr.getHost() << ":" << addr.getPort() << endl; - TestServerChannel* chan = new TestServerChannel(); - chan->setHandle(handle); - - _poller.addChannel(chan); - } - - void poll() - { - _poller.poll(); - } }; -TestServer testServer; +TestServer testServer; void waitForComplete(HTTP::Client* cl, TestRequest* tr) { diff --git a/simgear/io/test_HTTP.hxx b/simgear/io/test_HTTP.hxx new file mode 100644 index 00000000..e7a7c286 --- /dev/null +++ b/simgear/io/test_HTTP.hxx @@ -0,0 +1,213 @@ +#ifndef SIMGEAR_IO_TEST_HTTP_HXX +#define SIMGEAR_IO_TEST_HTTP_HXX + +#include + +#include +#include + +namespace simgear +{ + +class TestServerChannel : public NetChat +{ +public: + enum State + { + STATE_IDLE = 0, + STATE_HEADERS, + STATE_CLOSING, + STATE_REQUEST_BODY + }; + + TestServerChannel() + { + state = STATE_IDLE; + setTerminator("\r\n"); + + } + + virtual void collectIncomingData(const char* s, int n) + { + buffer += std::string(s, n); + } + + virtual void foundTerminator(void) + { + if (state == STATE_IDLE) { + state = STATE_HEADERS; + string_list line = strutils::split(buffer, NULL, 3); + if (line.size() < 3) { + std::cerr << "malformed request:" << buffer << std::endl; + exit(-1); + } + + method = line[0]; + path = line[1]; + + std::string::size_type queryPos = path.find('?'); + if (queryPos != std::string::npos) { + parseArgs(path.substr(queryPos + 1)); + path = path.substr(0, queryPos); + } + + httpVersion = line[2]; + requestHeaders.clear(); + buffer.clear(); + } else if (state == STATE_HEADERS) { + std::string s = strutils::simplify(buffer); + if (s.empty()) { + buffer.clear(); + receivedRequestHeaders(); + return; + } + + std::string::size_type colonPos = buffer.find(':'); + if (colonPos == std::string::npos) { + std::cerr << "test malformed HTTP response header:" << buffer << std::endl; + buffer.clear(); + return; + } + + std::string key = strutils::simplify(buffer.substr(0, colonPos)); + std::string value = strutils::strip(buffer.substr(colonPos + 1)); + requestHeaders[key] = value; + buffer.clear(); + } else if (state == STATE_REQUEST_BODY) { + receivedBody(); + setTerminator("\r\n"); + } else if (state == STATE_CLOSING) { + // ignore! + } + } + + void parseArgs(const std::string& argData) + { + string_list argv = strutils::split(argData, "&"); + for (unsigned int a=0; a requestHeaders; + std::map args; + int requestContentLength; +}; + +template +class TestServer : public NetChannel +{ + simgear::NetChannelPoller _poller; +public: + TestServer() + { + Socket::initSockets(); + + open(); + bind(NULL, 2000); // localhost, any port + listen(5); + + _poller.addChannel(this); + } + + virtual ~TestServer() + { + } + + virtual bool writable (void) { return false ; } + + virtual void handleAccept (void) + { + simgear::IPAddress addr ; + int handle = accept ( &addr ) ; + TestServerChannel* chan = new T(); + chan->setHandle(handle); + + _poller.addChannel(chan); + } + + void poll() + { + _poller.poll(); + } +}; + +} // of namespace simgear + +#endif // of SIMGEAR_IO_TEST_HTTP_HXX diff --git a/simgear/io/test_repository.cxx b/simgear/io/test_repository.cxx new file mode 100644 index 00000000..5769ed94 --- /dev/null +++ b/simgear/io/test_repository.cxx @@ -0,0 +1,507 @@ +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "test_HTTP.hxx" +#include "HTTPRepository.hxx" +#include "HTTPClient.hxx" + +#include +#include +#include +#include +#include +#include + +using namespace simgear; + +std::string dataForFile(const std::string& parentName, const std::string& name, int revision) +{ + std::ostringstream os; + // random content but which definitely depends on our tree location + // and revision. + for (int i=0; i<100; ++i) { + os << i << parentName << "_" << name << "_" << revision; + } + + return os.str(); +} + +class TestRepoEntry +{ +public: + TestRepoEntry(TestRepoEntry* parent, const std::string& name, bool isDir); + ~TestRepoEntry(); + + TestRepoEntry* parent; + std::string name; + + std::string indexLine() const; + + std::string hash() const; + + std::vector children; + + size_t sizeInBytes() const + { + return data().size(); + } + + bool isDir; + int revision; // for files + int requestCount; + + void clearRequestCounts(); + + std::string pathInRepo() const + { + return parent ? (parent->pathInRepo() + "/" + name) : name; + } + + std::string data() const; + + void defineFile(const std::string& path, int rev = 1) + { + string_list pathParts = strutils::split(path, "/"); + if (pathParts.size() == 1) { + children.push_back(new TestRepoEntry(this, pathParts.front(), false)); + children.back()->revision = rev; + } else { + // recurse + TestRepoEntry* c = childEntry(pathParts.front()); + if (!c) { + // define a new directory child + c = new TestRepoEntry(this, pathParts.front(), true); + children.push_back(c); + } + + size_t frontPartLength = pathParts.front().size(); + c->defineFile(path.substr(frontPartLength + 1), rev); + } + } + + TestRepoEntry* findEntry(const std::string& path) + { + if (path.empty()) { + return this; + } + + string_list pathParts = strutils::split(path, "/"); + TestRepoEntry* entry = childEntry(pathParts.front()); + if (pathParts.size() == 1) { + return entry; // might be NULL + } + + if (!entry) { + std::cerr << "bad path: " << path << std::endl; + return NULL; + } + + size_t part0Length = pathParts.front().size() + 1; + return entry->findEntry(path.substr(part0Length)); + } + + TestRepoEntry* childEntry(const std::string& name) const + { + assert(isDir); + for (int i=0; iname == name) { + return children[i]; + } + } + + return NULL; + } + + void removeChild(const std::string& name) + { + std::vector::iterator it; + for (it = children.begin(); it != children.end(); ++it) { + if ((*it)->name == name) { + delete *it; + children.erase(it); + return; + } + } + std::cerr << "child not found:" << name << std::endl; + } +}; + +TestRepoEntry::TestRepoEntry(TestRepoEntry* pr, const std::string& nm, bool d) : + parent(pr), name(nm), isDir(d) +{ + revision = 2; + requestCount = 0; +} + +TestRepoEntry::~TestRepoEntry() +{ + for (int i=0; iindexLine() << "\n"; + } + return os.str(); + } else { + return dataForFile(parent->name, name, revision); + } +} + +std::string TestRepoEntry::indexLine() const +{ + std::ostringstream os; + os << (isDir ? "d:" : "f:") << name << ":" << hash() + << ":" << sizeInBytes(); + return os.str(); +} + +std::string TestRepoEntry::hash() const +{ + simgear::sha1nfo info; + sha1_init(&info); + std::string d(data()); + sha1_write(&info, d.data(), d.size()); + return strutils::encodeHex(sha1_result(&info), HASH_LENGTH); +} + +void TestRepoEntry::clearRequestCounts() +{ + requestCount = 0; + if (isDir) { + for (int i=0; iclearRequestCounts(); + } + } +} + +TestRepoEntry* global_repo = NULL; + +class TestRepositoryChannel : public TestServerChannel +{ +public: + + virtual void processRequestHeaders() + { + state = STATE_IDLE; + if (path.find("/repo/") == 0) { +// std::cerr << "get for:" << path << std::endl; + + std::string repoPath = path.substr(6); + bool lookingForDir = false; + std::string::size_type suffix = repoPath.find(".dirindex"); + if (suffix != std::string::npos) { + lookingForDir = true; + if (suffix > 0) { + // trim the preceeding '/' as well, for non-root dirs + suffix--; + } + + repoPath = repoPath.substr(0, suffix); + } + + TestRepoEntry* entry = global_repo->findEntry(repoPath); + if (!entry) { + sendErrorResponse(404, false, "unknown repo path:" + repoPath); + return; + } + + if (entry->isDir != lookingForDir) { + sendErrorResponse(404, false, "mismatched path type:" + repoPath); + return; + } + + entry->requestCount++; + std::string content(entry->data()); + std::stringstream d; + d << "HTTP/1.1 " << 200 << " " << reasonForCode(200) << "\r\n"; + d << "Content-Length:" << content.size() << "\r\n"; + d << "\r\n"; // final CRLF to terminate the headers + d << content; + push(d.str().c_str()); + } else { + sendErrorResponse(404, false, ""); + } + } +}; + +std::string test_computeHashForPath(const SGPath& p) +{ + if (!p.exists()) + return std::string(); + sha1nfo info; + sha1_init(&info); + char* buf = static_cast(alloca(1024 * 1024)); + size_t readLen; + int fd = ::open(p.c_str(), O_RDONLY); + while ((readLen = ::read(fd, buf, 1024 * 1024)) > 0) { + sha1_write(&info, buf, readLen); + } + + std::string hashBytes((char*) sha1_result(&info), HASH_LENGTH); + return strutils::encodeHex(hashBytes); +} + +void verifyFileState(const SGPath& fsRoot, const std::string& relPath) +{ + TestRepoEntry* entry = global_repo->findEntry(relPath); + if (!entry) { + throw sg_error("Missing test repo entry", relPath); + } + + SGPath p(fsRoot); + p.append(relPath); + if (!p.exists()) { + throw sg_error("Missing file system entry", relPath); + } + + std::string hashOnDisk = test_computeHashForPath(p); + if (hashOnDisk != entry->hash()) { + throw sg_error("Checksum mismatch", relPath); + } +} + +void verifyRequestCount(const std::string& relPath, int count) +{ + TestRepoEntry* entry = global_repo->findEntry(relPath); + if (!entry) { + throw sg_error("Missing test repo entry", relPath); + } + + if (entry->requestCount != count) { + throw sg_exception("Bad request count", relPath); + } +} + +void createFile(const SGPath& basePath, const std::string& relPath, int revision) +{ + string_list comps = strutils::split(relPath, "/"); + + SGPath p(basePath); + p.append(relPath); + + simgear::Dir d(p.dir()); + d.create(0700); + + std::string prName = comps.at(comps.size() - 2); + { + std::ofstream f(p.str(), std::ios::trunc | std::ios::out); + f << dataForFile(prName, comps.back(), revision); + } +} + +TestServer testServer; + +void waitForUpdateComplete(HTTP::Client* cl, HTTPRepository* repo) +{ + SGTimeStamp start(SGTimeStamp::now()); + while (start.elapsedMSec() < 10000) { + cl->update(); + testServer.poll(); + + if (!repo->isDoingSync()) { + return; + } + SGTimeStamp::sleepForMSec(15); + } + + std::cerr << "timed out" << std::endl; +} + +void testBasicClone(HTTP::Client* cl) +{ + std::auto_ptr repo; + SGPath p(simgear::Dir::current().path()); + p.append("http_repo_basic"); + simgear::Dir pd(p); + pd.removeChildren(); + + repo.reset(new HTTPRepository(p, cl)); + repo->setBaseUrl("http://localhost:2000/repo"); + repo->update(); + + waitForUpdateComplete(cl, repo.get()); + + verifyFileState(p, "fileA"); + verifyFileState(p, "dirA/subdirA/fileAAA"); + verifyFileState(p, "dirC/subdirA/subsubA/fileCAAA"); + + global_repo->findEntry("fileA")->revision++; + global_repo->findEntry("dirB/subdirA/fileBAA")->revision++; + global_repo->defineFile("dirC/fileCA"); // new file + global_repo->findEntry("dirB/subdirA")->removeChild("fileBAB"); + global_repo->findEntry("dirA")->removeChild("subdirA"); // remove a dir + + repo->update(); + + // verify deltas + waitForUpdateComplete(cl, repo.get()); + + verifyFileState(p, "fileA"); + verifyFileState(p, "dirC/fileCA"); + + std::cout << "Passed test: basic clone and update" << std::endl; +} + +void testModifyLocalFiles(HTTP::Client* cl) +{ + std::auto_ptr repo; + SGPath p(simgear::Dir::current().path()); + p.append("http_repo_modify_local_2"); + simgear::Dir pd(p); + if (pd.exists()) { + pd.removeChildren(); + } + + repo.reset(new HTTPRepository(p, cl)); + repo->setBaseUrl("http://localhost:2000/repo"); + repo->update(); + + waitForUpdateComplete(cl, repo.get()); + verifyFileState(p, "dirB/subdirA/fileBAA"); + + SGPath modFile(p); + modFile.append("dirB/subdirA/fileBAA"); + { + std::ofstream of(modFile.str(), std::ios::out | std::ios::trunc); + of << "complete nonsense"; + of.close(); + } + + global_repo->clearRequestCounts(); + repo->update(); + waitForUpdateComplete(cl, repo.get()); + verifyFileState(p, "dirB/subdirA/fileBAA"); + verifyRequestCount("dirB", 0); + verifyRequestCount("dirB/subdirA", 0); + verifyRequestCount("dirB/subdirA/fileBAA", 1); + + std::cout << "Passed test: identify and fix locally modified files" << std::endl; +} + +void testNoChangesUpdate() +{ + +} + +void testMergeExistingFileWithoutDownload(HTTP::Client* cl) +{ + std::auto_ptr repo; + SGPath p(simgear::Dir::current().path()); + p.append("http_repo_merge_existing"); + simgear::Dir pd(p); + if (pd.exists()) { + pd.removeChildren(); + } + + repo.reset(new HTTPRepository(p, cl)); + repo->setBaseUrl("http://localhost:2000/repo"); + + createFile(p, "dirC/fileCB", 4); // should match + createFile(p, "dirC/fileCC", 3); // mismatch + + global_repo->defineFile("dirC/fileCB", 4); + global_repo->defineFile("dirC/fileCC", 10); + + // new sub-tree + createFile(p, "dirD/fileDA", 4); + createFile(p, "dirD/subdirDA/fileDAA", 6); + createFile(p, "dirD/subdirDB/fileDBA", 6); + + global_repo->defineFile("dirD/fileDA", 4); + global_repo->defineFile("dirD/subdirDA/fileDAA", 6); + global_repo->defineFile("dirD/subdirDB/fileDBA", 6); + + repo->update(); + waitForUpdateComplete(cl, repo.get()); + verifyFileState(p, "dirC/fileCB"); + verifyFileState(p, "dirC/fileCC"); + verifyRequestCount("dirC/fileCB", 0); + verifyRequestCount("dirC/fileCC", 1); + + verifyRequestCount("dirD/fileDA", 0); + verifyRequestCount("dirD/subdirDA/fileDAA", 0); + verifyRequestCount("dirD/subdirDB/fileDBA", 0); + + std::cout << "Passed test: merge existing files with matching hash" << std::endl; +} + +void testLossOfLocalFiles(HTTP::Client* cl) +{ + std::auto_ptr repo; + SGPath p(simgear::Dir::current().path()); + p.append("http_repo_lose_local"); + simgear::Dir pd(p); + if (pd.exists()) { + pd.removeChildren(); + } + + repo.reset(new HTTPRepository(p, cl)); + repo->setBaseUrl("http://localhost:2000/repo"); + repo->update(); + waitForUpdateComplete(cl, repo.get()); + verifyFileState(p, "dirB/subdirA/fileBAA"); + + SGPath lostPath(p); + lostPath.append("dirB/subdirA"); + simgear::Dir lpd(lostPath); + lpd.remove(true); + + global_repo->clearRequestCounts(); + + repo->update(); + waitForUpdateComplete(cl, repo.get()); + verifyFileState(p, "dirB/subdirA/fileBAA"); + + verifyRequestCount("dirB", 0); + verifyRequestCount("dirB/subdirA", 1); + verifyRequestCount("dirB/subdirA/fileBAC", 1); + + std::cout << "Passed test: lose and replace local files" << std::endl; +} + +int main(int argc, char* argv[]) +{ + sglog().setLogLevels( SG_ALL, SG_INFO ); + + HTTP::Client cl; + cl.setMaxConnections(1); + + global_repo = new TestRepoEntry(NULL, "root", true); + global_repo->defineFile("fileA"); + global_repo->defineFile("fileB"); + global_repo->defineFile("dirA/fileAA"); + global_repo->defineFile("dirA/fileAB"); + global_repo->defineFile("dirA/fileAC"); + global_repo->defineFile("dirA/subdirA/fileAAA"); + global_repo->defineFile("dirA/subdirA/fileAAB"); + global_repo->defineFile("dirB/subdirA/fileBAA"); + global_repo->defineFile("dirB/subdirA/fileBAB"); + global_repo->defineFile("dirB/subdirA/fileBAC"); + global_repo->defineFile("dirC/subdirA/subsubA/fileCAAA"); + + testBasicClone(&cl); + + testModifyLocalFiles(&cl); + + testLossOfLocalFiles(&cl); + + testMergeExistingFileWithoutDownload(&cl); + + return 0; +} diff --git a/simgear/misc/CMakeLists.txt b/simgear/misc/CMakeLists.txt index 5fa8a904..8bcfc0c5 100644 --- a/simgear/misc/CMakeLists.txt +++ b/simgear/misc/CMakeLists.txt @@ -10,6 +10,7 @@ set(HEADERS interpolator.hxx make_new.hxx sg_dir.hxx + sg_hash.hxx sg_path.hxx sgstream.hxx stdint.hxx @@ -29,6 +30,7 @@ set(SOURCES interpolator.cxx sg_dir.cxx sg_path.cxx + sg_hash.cxx sgstream.cxx strutils.cxx tabbed_values.cxx diff --git a/simgear/misc/sg_hash.cxx b/simgear/misc/sg_hash.cxx new file mode 100644 index 00000000..a83ff7a1 --- /dev/null +++ b/simgear/misc/sg_hash.cxx @@ -0,0 +1,28 @@ +// Hash functions with simgear API +// +// Copyright (C) 2016 James Turner +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Library General Public +// License as published by the Free Software Foundation; either +// version 2 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Library General Public License for more details. +// +// You should have received a copy of the GNU Library General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + +#include "sg_hash.hxx" + +#include + +namespace simgear +{ + +#include "sha1.c" + +} diff --git a/simgear/misc/sg_hash.hxx b/simgear/misc/sg_hash.hxx new file mode 100644 index 00000000..029e996e --- /dev/null +++ b/simgear/misc/sg_hash.hxx @@ -0,0 +1,60 @@ +// Hash functions with simgear API +// +// Copyright (C) 2016 James Turner +// +// This library is free software; you can redistribute it and/or +// modify it under the terms of the GNU Library General Public +// License as published by the Free Software Foundation; either +// version 2 of the License, or (at your option) any later version. +// +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +// Library General Public License for more details. +// +// You should have received a copy of the GNU Library General Public +// License along with this library; if not, write to the Free Software +// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA + +#include + +namespace simgear +{ + + /* header */ + + #define HASH_LENGTH 20 + #define BLOCK_LENGTH 64 + + typedef struct sha1nfo { + uint32_t buffer[BLOCK_LENGTH/4]; + uint32_t state[HASH_LENGTH/4]; + uint32_t byteCount; + uint8_t bufferOffset; + uint8_t keyBuffer[BLOCK_LENGTH]; + uint8_t innerHash[HASH_LENGTH]; + } sha1nfo; + + /* public API - prototypes - TODO: doxygen*/ + + /** + */ + void sha1_init(sha1nfo *s); + /** + */ + void sha1_writebyte(sha1nfo *s, uint8_t data); + /** + */ + void sha1_write(sha1nfo *s, const char *data, size_t len); + /** + */ + uint8_t* sha1_result(sha1nfo *s); + /** + */ + void sha1_initHmac(sha1nfo *s, const uint8_t* key, int keyLength); + /** + */ + uint8_t* sha1_resultHmac(sha1nfo *s); + + +} diff --git a/simgear/misc/sg_path.cxx b/simgear/misc/sg_path.cxx index 6aadb0a3..ef4a6d9a 100644 --- a/simgear/misc/sg_path.cxx +++ b/simgear/misc/sg_path.cxx @@ -233,7 +233,8 @@ SGPath::SGPath(const SGPath& p) : _exists(p._exists), _isDir(p._isDir), _isFile(p._isFile), - _modTime(p._modTime) + _modTime(p._modTime), + _size(p._size) { } @@ -250,6 +251,7 @@ SGPath& SGPath::operator=(const SGPath& p) _isDir = p._isDir; _isFile = p._isFile; _modTime = p._modTime; + _size = p._size; return *this; } @@ -283,6 +285,7 @@ SGPath::PermissionChecker SGPath::getPermissionChecker() const void SGPath::set_cached(bool cached) { _cacheEnabled = cached; + _cached = false; } // append another piece to the existing path @@ -445,6 +448,7 @@ void SGPath::validate() const _isFile = ((S_IFREG & buf.st_mode ) !=0); _isDir = ((S_IFDIR & buf.st_mode ) !=0); _modTime = buf.st_mtime; + _size = buf.st_size; } #else @@ -457,6 +461,7 @@ void SGPath::validate() const _isFile = ((S_ISREG(buf.st_mode )) != 0); _isDir = ((S_ISDIR(buf.st_mode )) != 0); _modTime = buf.st_mtime; + _size = buf.st_size; } #endif @@ -686,6 +691,12 @@ time_t SGPath::modTime() const return _modTime; } +size_t SGPath::sizeInBytes() const +{ + validate(); + return _size; +} + bool SGPath::operator==(const SGPath& other) const { return (path == other.path); diff --git a/simgear/misc/sg_path.hxx b/simgear/misc/sg_path.hxx index 3e5c22fe..2aad37f5 100644 --- a/simgear/misc/sg_path.hxx +++ b/simgear/misc/sg_path.hxx @@ -252,7 +252,12 @@ public: * modification time of the file */ time_t modTime() const; - + + /** + * + */ + size_t sizeInBytes() const; + /** * rename the file / directory we point at, to a new name * this may fail if the new location is on a different volume / share, @@ -315,6 +320,7 @@ private: mutable bool _isDir : 1; mutable bool _isFile : 1; mutable time_t _modTime; + mutable size_t _size; }; /// Output to an ostream diff --git a/simgear/misc/sha1.c b/simgear/misc/sha1.c new file mode 100644 index 00000000..047845b7 --- /dev/null +++ b/simgear/misc/sha1.c @@ -0,0 +1,289 @@ +/* This code is public-domain - it is based on libcrypt + * placed in the public domain by Wei Dai and other contributors. + */ +// gcc -Wall -DSHA1TEST -o sha1test sha1.c && ./sha1test + +//#include +//#include + + +#ifdef __BIG_ENDIAN__ +# define SHA_BIG_ENDIAN +#elif defined __LITTLE_ENDIAN__ +/* override */ +#elif defined __BYTE_ORDER +# if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define SHA_BIG_ENDIAN +# endif +#else // ! defined __LITTLE_ENDIAN__ +# include // machine/endian.h +# if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define SHA_BIG_ENDIAN +# endif +#endif + + +/* code */ +#define SHA1_K0 0x5a827999 +#define SHA1_K20 0x6ed9eba1 +#define SHA1_K40 0x8f1bbcdc +#define SHA1_K60 0xca62c1d6 + +void sha1_init(sha1nfo *s) { + s->state[0] = 0x67452301; + s->state[1] = 0xefcdab89; + s->state[2] = 0x98badcfe; + s->state[3] = 0x10325476; + s->state[4] = 0xc3d2e1f0; + s->byteCount = 0; + s->bufferOffset = 0; +} + +uint32_t sha1_rol32(uint32_t number, uint8_t bits) { + return ((number << bits) | (number >> (32-bits))); +} + +void sha1_hashBlock(sha1nfo *s) { + uint8_t i; + uint32_t a,b,c,d,e,t; + + a=s->state[0]; + b=s->state[1]; + c=s->state[2]; + d=s->state[3]; + e=s->state[4]; + for (i=0; i<80; i++) { + if (i>=16) { + t = s->buffer[(i+13)&15] ^ s->buffer[(i+8)&15] ^ s->buffer[(i+2)&15] ^ s->buffer[i&15]; + s->buffer[i&15] = sha1_rol32(t,1); + } + if (i<20) { + t = (d ^ (b & (c ^ d))) + SHA1_K0; + } else if (i<40) { + t = (b ^ c ^ d) + SHA1_K20; + } else if (i<60) { + t = ((b & c) | (d & (b | c))) + SHA1_K40; + } else { + t = (b ^ c ^ d) + SHA1_K60; + } + t+=sha1_rol32(a,5) + e + s->buffer[i&15]; + e=d; + d=c; + c=sha1_rol32(b,30); + b=a; + a=t; + } + s->state[0] += a; + s->state[1] += b; + s->state[2] += c; + s->state[3] += d; + s->state[4] += e; +} + +void sha1_addUncounted(sha1nfo *s, uint8_t data) { + uint8_t * const b = (uint8_t*) s->buffer; +#ifdef SHA_BIG_ENDIAN + b[s->bufferOffset] = data; +#else + b[s->bufferOffset ^ 3] = data; +#endif + s->bufferOffset++; + if (s->bufferOffset == BLOCK_LENGTH) { + sha1_hashBlock(s); + s->bufferOffset = 0; + } +} + +void sha1_writebyte(sha1nfo *s, uint8_t data) { + ++s->byteCount; + sha1_addUncounted(s, data); +} + +void sha1_write(sha1nfo *s, const char *data, size_t len) { + for (;len--;) sha1_writebyte(s, (uint8_t) *data++); +} + +void sha1_pad(sha1nfo *s) { + // Implement SHA-1 padding (fips180-2 §5.1.1) + + // Pad with 0x80 followed by 0x00 until the end of the block + sha1_addUncounted(s, 0x80); + while (s->bufferOffset != 56) sha1_addUncounted(s, 0x00); + + // Append length in the last 8 bytes + sha1_addUncounted(s, 0); // We're only using 32 bit lengths + sha1_addUncounted(s, 0); // But SHA-1 supports 64 bit lengths + sha1_addUncounted(s, 0); // So zero pad the top bits + sha1_addUncounted(s, s->byteCount >> 29); // Shifting to multiply by 8 + sha1_addUncounted(s, s->byteCount >> 21); // as SHA-1 supports bitstreams as well as + sha1_addUncounted(s, s->byteCount >> 13); // byte. + sha1_addUncounted(s, s->byteCount >> 5); + sha1_addUncounted(s, s->byteCount << 3); +} + +uint8_t* sha1_result(sha1nfo *s) { + // Pad to complete the last block + sha1_pad(s); + +#ifndef SHA_BIG_ENDIAN + // Swap byte order back + int i; + for (i=0; i<5; i++) { + s->state[i]= + (((s->state[i])<<24)& 0xff000000) + | (((s->state[i])<<8) & 0x00ff0000) + | (((s->state[i])>>8) & 0x0000ff00) + | (((s->state[i])>>24)& 0x000000ff); + } +#endif + + // Return pointer to hash (20 characters) + return (uint8_t*) s->state; +} + +#define HMAC_IPAD 0x36 +#define HMAC_OPAD 0x5c + +void sha1_initHmac(sha1nfo *s, const uint8_t* key, int keyLength) { + uint8_t i; + memset(s->keyBuffer, 0, BLOCK_LENGTH); + if (keyLength > BLOCK_LENGTH) { + // Hash long keys + sha1_init(s); + for (;keyLength--;) sha1_writebyte(s, *key++); + memcpy(s->keyBuffer, sha1_result(s), HASH_LENGTH); + } else { + // Block length keys are used as is + memcpy(s->keyBuffer, key, keyLength); + } + // Start inner hash + sha1_init(s); + for (i=0; ikeyBuffer[i] ^ HMAC_IPAD); + } +} + +uint8_t* sha1_resultHmac(sha1nfo *s) { + uint8_t i; + // Complete inner hash + memcpy(s->innerHash,sha1_result(s),HASH_LENGTH); + // Calculate outer hash + sha1_init(s); + for (i=0; ikeyBuffer[i] ^ HMAC_OPAD); + for (i=0; iinnerHash[i]); + return sha1_result(s); +} + +/* self-test */ + +#if SHA1TEST +#include + +uint8_t hmacKey1[]={ + 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f, + 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f, + 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f, + 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f +}; +uint8_t hmacKey2[]={ + 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f, + 0x40,0x41,0x42,0x43 +}; +uint8_t hmacKey3[]={ + 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f, + 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f, + 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f, + 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f, + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f, + 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf, + 0xb0,0xb1,0xb2,0xb3 +}; +uint8_t hmacKey4[]={ + 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f, + 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87,0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f, + 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f, + 0xa0 +}; + +void printHash(uint8_t* hash) { + int i; + for (i=0; i<20; i++) { + printf("%02x", hash[i]); + } + printf("\n"); +} + + +int main (int argc, char **argv) { + uint32_t a; + sha1nfo s; + + // SHA tests + printf("Test: FIPS 180-2 C.1 and RFC3174 7.3 TEST1\n"); + printf("Expect:a9993e364706816aba3e25717850c26c9cd0d89d\n"); + printf("Result:"); + sha1_init(&s); + sha1_write(&s, "abc", 3); + printHash(sha1_result(&s)); + printf("\n\n"); + + printf("Test: FIPS 180-2 C.2 and RFC3174 7.3 TEST2\n"); + printf("Expect:84983e441c3bd26ebaae4aa1f95129e5e54670f1\n"); + printf("Result:"); + sha1_init(&s); + sha1_write(&s, "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", 56); + printHash(sha1_result(&s)); + printf("\n\n"); + + printf("Test: RFC3174 7.3 TEST4\n"); + printf("Expect:dea356a2cddd90c7a7ecedc5ebb563934f460452\n"); + printf("Result:"); + sha1_init(&s); + for (a=0; a<80; a++) sha1_write(&s, "01234567", 8); + printHash(sha1_result(&s)); + printf("\n\n"); + + // HMAC tests + printf("Test: FIPS 198a A.1\n"); + printf("Expect:4f4ca3d5d68ba7cc0a1208c9c61e9c5da0403c0a\n"); + printf("Result:"); + sha1_initHmac(&s, hmacKey1, 64); + sha1_write(&s, "Sample #1",9); + printHash(sha1_resultHmac(&s)); + printf("\n\n"); + + printf("Test: FIPS 198a A.2\n"); + printf("Expect:0922d3405faa3d194f82a45830737d5cc6c75d24\n"); + printf("Result:"); + sha1_initHmac(&s, hmacKey2, 20); + sha1_write(&s, "Sample #2", 9); + printHash(sha1_resultHmac(&s)); + printf("\n\n"); + + printf("Test: FIPS 198a A.3\n"); + printf("Expect:bcf41eab8bb2d802f3d05caf7cb092ecf8d1a3aa\n"); + printf("Result:"); + sha1_initHmac(&s, hmacKey3,100); + sha1_write(&s, "Sample #3", 9); + printHash(sha1_resultHmac(&s)); + printf("\n\n"); + + printf("Test: FIPS 198a A.4\n"); + printf("Expect:9ea886efe268dbecce420c7524df32e0751a2a26\n"); + printf("Result:"); + sha1_initHmac(&s, hmacKey4,49); + sha1_write(&s, "Sample #4", 9); + printHash(sha1_resultHmac(&s)); + printf("\n\n"); + + // Long tests + printf("Test: FIPS 180-2 C.3 and RFC3174 7.3 TEST3\n"); + printf("Expect:34aa973cd4c4daa4f61eeb2bdbad27316534016f\n"); + printf("Result:"); + sha1_init(&s); + for (a=0; a<1000000; a++) sha1_writebyte(&s, 'a'); + printHash(sha1_result(&s)); + + return 0; +} +#endif /* self-test */