mirror of
https://github.com/subsurface/subsurface.git
synced 2025-02-19 22:16:15 +00:00
Merge branch 'gitMerge'
This commit is contained in:
commit
c22adebebe
17 changed files with 467 additions and 58 deletions
|
@ -571,6 +571,7 @@ if(NOT NO_TESTS)
|
|||
TEST(TestProfile testprofile.cpp)
|
||||
TEST(TestGpsCoords testgpscoords.cpp)
|
||||
TEST(TestParse testparse.cpp)
|
||||
TEST(TestGitStorage testgitstorage.cpp)
|
||||
TEST(TestPlan testplan.cpp)
|
||||
endif()
|
||||
|
||||
|
|
|
@ -222,7 +222,7 @@ static struct dive dt_dive_parser(FILE *archivo, struct dive *dt_dive)
|
|||
snprintf(buffer, sizeof(buffer), "%s, %s", locality, dive_point);
|
||||
dt_dive->dive_site_uuid = get_dive_site_uuid_by_name(buffer, NULL);
|
||||
if (dt_dive->dive_site_uuid == 0)
|
||||
dt_dive->dive_site_uuid = create_dive_site(buffer);
|
||||
dt_dive->dive_site_uuid = create_dive_site(buffer, dt_dive->when);
|
||||
free(locality);
|
||||
free(dive_point);
|
||||
|
||||
|
|
2
dive.c
2
dive.c
|
@ -3033,7 +3033,7 @@ void dive_set_geodata_from_picture(struct dive *dive, struct picture *picture)
|
|||
ds->latitude = picture->latitude;
|
||||
ds->longitude = picture->longitude;
|
||||
} else {
|
||||
dive->dive_site_uuid = create_dive_site_with_gps("", picture->latitude, picture->longitude);
|
||||
dive->dive_site_uuid = create_dive_site_with_gps("", picture->latitude, picture->longitude, dive->when);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
49
divesite.c
49
divesite.c
|
@ -104,10 +104,13 @@ struct dive_site *alloc_dive_site(uint32_t uuid)
|
|||
exit(1);
|
||||
sites[nr] = ds;
|
||||
dive_site_table.nr = nr + 1;
|
||||
if (uuid)
|
||||
if (uuid) {
|
||||
if (get_dive_site_by_uuid(uuid))
|
||||
fprintf(stderr, "PROBLEM: duplicate uuid %08x\n", uuid);
|
||||
ds->uuid = uuid;
|
||||
else
|
||||
} else {
|
||||
ds->uuid = dive_site_getUniqId();
|
||||
}
|
||||
return ds;
|
||||
}
|
||||
|
||||
|
@ -157,19 +160,33 @@ void delete_dive_site(uint32_t id)
|
|||
}
|
||||
}
|
||||
|
||||
/* allocate a new site and add it to the table */
|
||||
uint32_t create_dive_site(const char *name)
|
||||
uint32_t create_divesite_uuid(const char *name, timestamp_t divetime)
|
||||
{
|
||||
struct dive_site *ds = alloc_dive_site(0);
|
||||
unsigned char hash[20];
|
||||
SHA_CTX ctx;
|
||||
SHA1_Init(&ctx);
|
||||
SHA1_Update(&ctx, &divetime, sizeof(timestamp_t));
|
||||
SHA1_Update(&ctx, name, strlen(name));
|
||||
SHA1_Final(hash, &ctx);
|
||||
// now return the first 32 of the 160 bit hash
|
||||
return *(uint32_t *)hash;
|
||||
}
|
||||
|
||||
/* allocate a new site and add it to the table */
|
||||
uint32_t create_dive_site(const char *name, timestamp_t divetime)
|
||||
{
|
||||
uint32_t uuid = create_divesite_uuid(name, divetime);
|
||||
struct dive_site *ds = alloc_dive_site(uuid);
|
||||
ds->name = copy_string(name);
|
||||
|
||||
return ds->uuid;
|
||||
return uuid;
|
||||
}
|
||||
|
||||
/* same as before, but with GPS data */
|
||||
uint32_t create_dive_site_with_gps(const char *name, degrees_t latitude, degrees_t longitude)
|
||||
uint32_t create_dive_site_with_gps(const char *name, degrees_t latitude, degrees_t longitude, timestamp_t divetime)
|
||||
{
|
||||
struct dive_site *ds = alloc_dive_site(0);
|
||||
uint32_t uuid = create_divesite_uuid(name, divetime);
|
||||
struct dive_site *ds = alloc_dive_site(uuid);
|
||||
ds->name = copy_string(name);
|
||||
ds->latitude = latitude;
|
||||
ds->longitude = longitude;
|
||||
|
@ -231,7 +248,7 @@ void clear_dive_site(struct dive_site *ds)
|
|||
free_taxonomy(&ds->taxonomy);
|
||||
}
|
||||
|
||||
uint32_t find_or_create_dive_site_with_name(const char *name)
|
||||
uint32_t find_or_create_dive_site_with_name(const char *name, timestamp_t divetime)
|
||||
{
|
||||
int i;
|
||||
struct dive_site *ds;
|
||||
|
@ -244,5 +261,17 @@ uint32_t find_or_create_dive_site_with_name(const char *name)
|
|||
}
|
||||
if (ds)
|
||||
return ds->uuid;
|
||||
return create_dive_site(name);
|
||||
return create_dive_site(name, divetime);
|
||||
}
|
||||
|
||||
static int compare_sites(const void *_a, const void *_b)
|
||||
{
|
||||
const struct dive_site *a = (const struct dive_site *)*(void **)_a;
|
||||
const struct dive_site *b = (const struct dive_site *)*(void **)_b;
|
||||
return a->uuid > b->uuid ? 1 : a->uuid == b->uuid ? 0 : -1;
|
||||
}
|
||||
|
||||
void dive_site_table_sort()
|
||||
{
|
||||
qsort(dive_site_table.dive_sites, dive_site_table.nr, sizeof(struct dive_site *), compare_sites);
|
||||
}
|
||||
|
|
|
@ -49,12 +49,13 @@ static inline struct dive_site *get_dive_site_by_uuid(uint32_t uuid)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void dive_site_table_sort();
|
||||
struct dive_site *alloc_dive_site(uint32_t uuid);
|
||||
int nr_of_dives_at_dive_site(uint32_t uuid, bool select_only);
|
||||
bool is_dive_site_used(uint32_t uuid, bool select_only);
|
||||
void delete_dive_site(uint32_t id);
|
||||
uint32_t create_dive_site(const char *name);
|
||||
uint32_t create_dive_site_with_gps(const char *name, degrees_t latitude, degrees_t longitude);
|
||||
uint32_t create_dive_site(const char *name, timestamp_t divetime);
|
||||
uint32_t create_dive_site_with_gps(const char *name, degrees_t latitude, degrees_t longitude, timestamp_t divetime);
|
||||
uint32_t get_dive_site_uuid_by_name(const char *name, struct dive_site **dsp);
|
||||
uint32_t get_dive_site_uuid_by_gps(degrees_t latitude, degrees_t longitude, struct dive_site **dsp);
|
||||
uint32_t get_dive_site_uuid_by_gps_proximity(degrees_t latitude, degrees_t longitude, int distance, struct dive_site **dsp);
|
||||
|
@ -62,7 +63,7 @@ bool dive_site_is_empty(struct dive_site *ds);
|
|||
void copy_dive_site(struct dive_site *orig, struct dive_site *copy);
|
||||
void clear_dive_site(struct dive_site *ds);
|
||||
unsigned int get_distance(degrees_t lat1, degrees_t lon1, degrees_t lat2, degrees_t lon2);
|
||||
uint32_t find_or_create_dive_site_with_name(const char *name);
|
||||
uint32_t find_or_create_dive_site_with_name(const char *name, timestamp_t divetime);
|
||||
|
||||
#define INVALID_DIVE_SITE_NAME "development use only - not a valid dive site name"
|
||||
|
||||
|
|
126
git-access.c
126
git-access.c
|
@ -40,12 +40,12 @@
|
|||
/*
|
||||
* api break introduced in libgit2 master after 0.22 - let's guess this is the v0.23 API
|
||||
*/
|
||||
#if USE_LIBGIT23_API
|
||||
#if USE_LIBGIT23_API || (!LIBGIT2_VER_MAJOR && LIBGIT2_VER_MINOR >= 23)
|
||||
#define git_branch_create(out, repo, branch_name, target, force, signature, log_message) \
|
||||
git_branch_create(out, repo, branch_name, target, force)
|
||||
#endif
|
||||
|
||||
static char *get_local_dir(const char *remote, const char *branch)
|
||||
char *get_local_dir(const char *remote, const char *branch)
|
||||
{
|
||||
SHA_CTX ctx;
|
||||
unsigned char hash[20];
|
||||
|
@ -69,7 +69,7 @@ static int check_clean(const char *path, unsigned int status, void *payload)
|
|||
status &= ~GIT_STATUS_CURRENT | GIT_STATUS_IGNORED;
|
||||
if (!status)
|
||||
return 0;
|
||||
report_error("WARNING: Git cache directory modified (path %s)", path);
|
||||
report_error("WARNING: Git cache directory modified (path %s) status %0x", path, status);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -143,11 +143,12 @@ int certificate_check_cb(git_cert *cert, int valid, const char *host, void *payl
|
|||
SHA1_Update(&ctx, cert509->data, cert509->len);
|
||||
SHA1_Final(hash, &ctx);
|
||||
hash[20] = 0;
|
||||
if (same_string((char *)hash, KNOWN_CERT)) {
|
||||
fprintf(stderr, "cloud certificate considered %s, forcing it valid\n",
|
||||
valid ? "valid" : "not valid");
|
||||
return 1;
|
||||
}
|
||||
if (verbose > 1)
|
||||
if (same_string((char *)hash, KNOWN_CERT)) {
|
||||
fprintf(stderr, "cloud certificate considered %s, forcing it valid\n",
|
||||
valid ? "valid" : "not valid");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return valid;
|
||||
}
|
||||
|
@ -176,6 +177,101 @@ static int update_remote(git_repository *repo, git_remote *origin, git_reference
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern int update_git_checkout(git_repository *repo, git_object *parent, git_tree *tree);
|
||||
|
||||
static int try_to_git_merge(git_repository *repo, git_reference *local, git_reference *remote, git_oid *base, const git_oid *local_id, const git_oid *remote_id)
|
||||
{
|
||||
git_tree *local_tree, *remote_tree, *base_tree;
|
||||
git_commit *local_commit, *remote_commit, *base_commit;
|
||||
git_index *merged_index;
|
||||
git_merge_options merge_options;
|
||||
|
||||
if (verbose) {
|
||||
char outlocal[41], outremote[41];
|
||||
outlocal[40] = outremote[40] = 0;
|
||||
git_oid_fmt(outlocal, local_id);
|
||||
git_oid_fmt(outremote, remote_id);
|
||||
fprintf(stderr, "trying to merge local SHA %s remote SHA %s\n", outlocal, outremote);
|
||||
}
|
||||
|
||||
git_merge_init_options(&merge_options, GIT_MERGE_OPTIONS_VERSION);
|
||||
merge_options.tree_flags = GIT_MERGE_TREE_FIND_RENAMES;
|
||||
merge_options.file_favor = GIT_MERGE_FILE_FAVOR_UNION;
|
||||
merge_options.rename_threshold = 100;
|
||||
if (git_commit_lookup(&local_commit, repo, local_id))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: can't get commit (%s)"), giterr_last()->message);
|
||||
if (git_commit_tree(&local_tree, local_commit))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: failed local tree lookup (%s)"), giterr_last()->message);
|
||||
if (git_commit_lookup(&remote_commit, repo, remote_id))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: can't get commit (%s)"), giterr_last()->message);
|
||||
if (git_commit_tree(&remote_tree, remote_commit))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: failed remote tree lookup (%s)"), giterr_last()->message);
|
||||
if (git_commit_lookup(&base_commit, repo, base))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: can't get commit: (%s)"), giterr_last()->message);
|
||||
if (git_commit_tree(&base_tree, base_commit))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: failed base tree lookup: (%s)"), giterr_last()->message);
|
||||
if (git_merge_trees(&merged_index, repo, base_tree, local_tree, remote_tree, &merge_options))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: merge failed (%s)"), giterr_last()->message);
|
||||
if (git_index_has_conflicts(merged_index)) {
|
||||
int error;
|
||||
const git_index_entry *ancestor = NULL,
|
||||
*ours = NULL,
|
||||
*theirs = NULL;
|
||||
git_index_conflict_iterator *iter = NULL;
|
||||
error = git_index_conflict_iterator_new(&iter, merged_index);
|
||||
while (git_index_conflict_next(&ancestor, &ours, &theirs, iter)
|
||||
!= GIT_ITEROVER) {
|
||||
/* Mark this conflict as resolved */
|
||||
fprintf(stderr, "conflict in %s / %s / %s -- ",
|
||||
ours ? ours->path : "-",
|
||||
theirs ? theirs->path : "-",
|
||||
ancestor ? ancestor->path : "-");
|
||||
if ((!ours && theirs && ancestor) ||
|
||||
(ours && !theirs && ancestor)) {
|
||||
// the file was removed on one side or the other - just remove it
|
||||
fprintf(stderr, "looks like a delete on one side; removing the file from the index\n");
|
||||
error = git_index_remove(merged_index, ours ? ours->path : theirs->path, GIT_INDEX_STAGE_ANY);
|
||||
} else {
|
||||
error = git_index_conflict_remove(merged_index, ours ? ours->path : theirs ? theirs->path : ancestor->path);
|
||||
}
|
||||
if (error) {
|
||||
fprintf(stderr, "error at conflict resplution (%s)", giterr_last()->message);
|
||||
}
|
||||
}
|
||||
git_index_conflict_cleanup(merged_index);
|
||||
git_index_conflict_iterator_free(iter);
|
||||
report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: merge conflict - manual intervention needed"));
|
||||
}
|
||||
git_oid merge_oid, commit_oid;
|
||||
git_tree *merged_tree;
|
||||
git_signature *author;
|
||||
git_commit *commit;
|
||||
|
||||
if (git_index_write_tree_to(&merge_oid, merged_index, repo))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: writing the tree failed (%s)"), giterr_last()->message);
|
||||
if (git_tree_lookup(&merged_tree, repo, &merge_oid))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: tree lookup failed (%s)"), giterr_last()->message);
|
||||
if (git_signature_default(&author, repo) < 0)
|
||||
return report_error(translate("gettextFromC", "Failed to get author: (%s)"), giterr_last()->message);
|
||||
if (git_commit_create_v(&commit_oid, repo, NULL, author, author, NULL, "automatic merge", merged_tree, 2, local_commit, remote_commit))
|
||||
return report_error(translate("gettextFromC", "Remote storage and local data diverged. Error: git commit create failed (%s)"), giterr_last()->message);
|
||||
if (git_commit_lookup(&commit, repo, &commit_oid))
|
||||
return report_error(translate("gettextFromC", "Error: could not lookup the merge commit I just created (%s)"), giterr_last()->message);
|
||||
if (git_branch_is_head(local) && !git_repository_is_bare(repo)) {
|
||||
git_object *parent;
|
||||
git_reference_peel(&parent, local, GIT_OBJ_COMMIT);
|
||||
if (update_git_checkout(repo, parent, merged_tree)) {
|
||||
report_error("Warning: checked out branch is inconsistent with git data");
|
||||
}
|
||||
}
|
||||
if (git_reference_set_target(&local, local, &commit_oid, "Subsurface merge event"))
|
||||
return report_error("Error: failed to update branch (%s)", giterr_last()->message);
|
||||
set_git_id(&commit_oid);
|
||||
git_signature_free(author);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int try_to_update(git_repository *repo, git_remote *origin, git_reference *local, git_reference *remote, enum remote_transport rt)
|
||||
{
|
||||
git_oid base;
|
||||
|
@ -214,16 +310,8 @@ static int try_to_update(git_repository *repo, git_remote *origin, git_reference
|
|||
if (git_branch_is_head(local) != 1)
|
||||
return report_error("Local and remote do not match, local branch not HEAD - cannot update");
|
||||
|
||||
/*
|
||||
* Some day we migth try a clean merge here.
|
||||
*
|
||||
* But I couldn't find any good examples of this, so for now
|
||||
* you'd need to merge divergent histories manually. But we've
|
||||
* at least verified above that we have a working tree and the
|
||||
* current branch is checked out and clean, so we *could* try
|
||||
* to merge.
|
||||
*/
|
||||
return report_error("Local and remote have diverged, need to merge");
|
||||
/* Ok, let's try to merge these */
|
||||
return try_to_git_merge(repo, local, remote, &base, local_id, remote_id);
|
||||
}
|
||||
|
||||
static int check_remote_status(git_repository *repo, git_remote *origin, const char *branch, enum remote_transport rt)
|
||||
|
@ -266,6 +354,8 @@ int sync_with_remote(git_repository *repo, const char *remote, const char *branc
|
|||
char *proxy_string;
|
||||
git_config *conf;
|
||||
|
||||
if (verbose)
|
||||
fprintf(stderr, "sync with remote %s[%s]\n", remote, branch);
|
||||
git_repository_config(&conf, repo);
|
||||
if (rt == RT_HTTPS && getProxyString(&proxy_string)) {
|
||||
git_config_set_string(conf, "http.proxy", proxy_string);
|
||||
|
|
|
@ -101,6 +101,7 @@ static void parse_dives (int log_version, const unsigned char *buf, unsigned int
|
|||
|
||||
while (ptr < buf_size) {
|
||||
int i;
|
||||
bool found_divesite = false;
|
||||
dive = alloc_dive();
|
||||
primary_sensor = 0;
|
||||
dc = &dive->dc;
|
||||
|
@ -148,10 +149,8 @@ static void parse_dives (int log_version, const unsigned char *buf, unsigned int
|
|||
}
|
||||
|
||||
/* Store the location only if we have one */
|
||||
if (len || place_len) {
|
||||
dive->dive_site_uuid = find_or_create_dive_site_with_name(location);
|
||||
free(location);
|
||||
}
|
||||
if (len || place_len)
|
||||
found_divesite = true;
|
||||
|
||||
ptr += len + 4 + place_len;
|
||||
|
||||
|
@ -183,6 +182,12 @@ static void parse_dives (int log_version, const unsigned char *buf, unsigned int
|
|||
dive->when = array_uint32_le(buf + ptr);
|
||||
ptr += 4;
|
||||
|
||||
// now that we have the dive time we can store the divesite
|
||||
// (we need the dive time to create deterministic uuids)
|
||||
if (found_divesite) {
|
||||
dive->dive_site_uuid = find_or_create_dive_site_with_name(location, dive->when);
|
||||
free(location);
|
||||
}
|
||||
//unsigned int end_time = array_uint32_le(buf + ptr);
|
||||
ptr += 4;
|
||||
|
||||
|
|
10
load-git.c
10
load-git.c
|
@ -179,7 +179,7 @@ static void parse_dive_gps(char *line, struct membuffer *str, void *_dive)
|
|||
if (!ds) {
|
||||
uuid = get_dive_site_uuid_by_gps(latitude, longitude, NULL);
|
||||
if (!uuid)
|
||||
uuid = create_dive_site_with_gps("", latitude, longitude);
|
||||
uuid = create_dive_site_with_gps("", latitude, longitude, dive->when);
|
||||
dive->dive_site_uuid = uuid;
|
||||
} else {
|
||||
if (dive_site_has_gps_location(ds) &&
|
||||
|
@ -204,7 +204,7 @@ static void parse_dive_location(char *line, struct membuffer *str, void *_dive)
|
|||
if (!ds) {
|
||||
uuid = get_dive_site_uuid_by_name(name, NULL);
|
||||
if (!uuid)
|
||||
uuid = create_dive_site(name);
|
||||
uuid = create_dive_site(name, dive->when);
|
||||
dive->dive_site_uuid = uuid;
|
||||
} else {
|
||||
// we already had a dive site linked to the dive
|
||||
|
@ -1443,8 +1443,8 @@ static int parse_site_entry(git_repository *repo, const git_tree_entry *entry, c
|
|||
{
|
||||
if (*suffix == '\0')
|
||||
return report_error("Dive site without uuid");
|
||||
struct dive_site *ds = alloc_dive_site(0);
|
||||
ds->uuid = strtoul(suffix, NULL, 16);
|
||||
uint32_t uuid = strtoul(suffix, NULL, 16);
|
||||
struct dive_site *ds = alloc_dive_site(uuid);
|
||||
git_blob *blob = git_tree_entry_blob(repo, entry);
|
||||
if (!blob)
|
||||
return report_error("Unable to read dive site file");
|
||||
|
@ -1531,6 +1531,8 @@ static int walk_tree_file(const char *root, const git_tree_entry *entry, git_rep
|
|||
struct dive *dive = active_dive;
|
||||
dive_trip_t *trip = active_trip;
|
||||
const char *name = git_tree_entry_name(entry);
|
||||
if (verbose)
|
||||
fprintf(stderr, "git load handling file %s\n", name);
|
||||
switch (*name) {
|
||||
/* Picture file? They are saved as time offsets in the dive */
|
||||
case '-': case '+':
|
||||
|
|
16
parse-xml.c
16
parse-xml.c
|
@ -990,7 +990,7 @@ static void divinglog_place(char *place, uint32_t *uuid)
|
|||
country ? country : "");
|
||||
*uuid = get_dive_site_uuid_by_name(buffer, NULL);
|
||||
if (*uuid == 0)
|
||||
*uuid = create_dive_site(buffer);
|
||||
*uuid = create_dive_site(buffer, cur_dive->when);
|
||||
|
||||
city = NULL;
|
||||
country = NULL;
|
||||
|
@ -1137,7 +1137,7 @@ static void gps_lat(char *buffer, struct dive *dive)
|
|||
degrees_t latitude = parse_degrees(buffer, &end);
|
||||
struct dive_site *ds = get_dive_site_for_dive(dive);
|
||||
if (!ds) {
|
||||
dive->dive_site_uuid = create_dive_site_with_gps(NULL, latitude, (degrees_t){0});
|
||||
dive->dive_site_uuid = create_dive_site_with_gps(NULL, latitude, (degrees_t){0}, dive->when);
|
||||
} else {
|
||||
if (ds->latitude.udeg && ds->latitude.udeg != latitude.udeg)
|
||||
fprintf(stderr, "Oops, changing the latitude of existing dive site id %8x name %s; not good\n", ds->uuid, ds->name ?: "(unknown)");
|
||||
|
@ -1151,7 +1151,7 @@ static void gps_long(char *buffer, struct dive *dive)
|
|||
degrees_t longitude = parse_degrees(buffer, &end);
|
||||
struct dive_site *ds = get_dive_site_for_dive(dive);
|
||||
if (!ds) {
|
||||
dive->dive_site_uuid = create_dive_site_with_gps(NULL, (degrees_t){0}, longitude);
|
||||
dive->dive_site_uuid = create_dive_site_with_gps(NULL, (degrees_t){0}, longitude, dive->when);
|
||||
} else {
|
||||
if (ds->longitude.udeg && ds->longitude.udeg != longitude.udeg)
|
||||
fprintf(stderr, "Oops, changing the longitude of existing dive site id %8x name %s; not good\n", ds->uuid, ds->name ?: "(unknown)");
|
||||
|
@ -1189,7 +1189,7 @@ static void gps_in_dive(char *buffer, struct dive *dive)
|
|||
cur_longitude = longitude;
|
||||
dive->dive_site_uuid = uuid;
|
||||
} else {
|
||||
dive->dive_site_uuid = create_dive_site_with_gps("", latitude, longitude);
|
||||
dive->dive_site_uuid = create_dive_site_with_gps("", latitude, longitude, dive->when);
|
||||
ds = get_dive_site_by_uuid(dive->dive_site_uuid);
|
||||
}
|
||||
} else {
|
||||
|
@ -1247,7 +1247,7 @@ static void add_dive_site(char *ds_name, struct dive *dive)
|
|||
ds->name = copy_string(buffer);
|
||||
} else if (!same_string(ds->name, buffer)) {
|
||||
// if it's not the same name, it's not the same dive site
|
||||
dive->dive_site_uuid = create_dive_site(buffer);
|
||||
dive->dive_site_uuid = create_dive_site(buffer, dive->when);
|
||||
struct dive_site *newds = get_dive_site_by_uuid(dive->dive_site_uuid);
|
||||
if (cur_latitude.udeg || cur_longitude.udeg) {
|
||||
// we started this uuid with GPS data, so lets use those
|
||||
|
@ -1263,7 +1263,7 @@ static void add_dive_site(char *ds_name, struct dive *dive)
|
|||
dive->dive_site_uuid = uuid;
|
||||
}
|
||||
} else {
|
||||
dive->dive_site_uuid = create_dive_site(buffer);
|
||||
dive->dive_site_uuid = create_dive_site(buffer, dive->when);
|
||||
}
|
||||
}
|
||||
free(to_free);
|
||||
|
@ -2693,7 +2693,7 @@ extern int cobalt_location(void *handle, int columns, char **data, char **column
|
|||
sprintf(tmp, "%s / %s", location, data[0]);
|
||||
free(location);
|
||||
location = NULL;
|
||||
cur_dive->dive_site_uuid = find_or_create_dive_site_with_name(tmp);
|
||||
cur_dive->dive_site_uuid = find_or_create_dive_site_with_name(tmp, cur_dive->when);
|
||||
free(tmp);
|
||||
} else {
|
||||
location = strdup(data[0]);
|
||||
|
@ -3110,7 +3110,7 @@ extern int divinglog_dive(void *param, int columns, char **data, char **column)
|
|||
cur_dive->when = (time_t)(atol(data[1]));
|
||||
|
||||
if (data[2])
|
||||
cur_dive->dive_site_uuid = find_or_create_dive_site_with_name(data[2]);
|
||||
cur_dive->dive_site_uuid = find_or_create_dive_site_with_name(data[2], cur_dive->when);
|
||||
|
||||
if (data[3])
|
||||
utf8_string(data[3], &cur_dive->buddy);
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include "units.h"
|
||||
#include "divelocationmodel.h"
|
||||
#include "dive.h"
|
||||
#include <QDebug>
|
||||
|
@ -126,14 +127,14 @@ void LocationInformationModel::update()
|
|||
endResetModel();
|
||||
}
|
||||
|
||||
int32_t LocationInformationModel::addDiveSite(const QString& name, int lon, int lat)
|
||||
int32_t LocationInformationModel::addDiveSite(const QString& name, timestamp_t divetime, int lon, int lat)
|
||||
{
|
||||
degrees_t latitude, longitude;
|
||||
latitude.udeg = lat;
|
||||
longitude.udeg = lon;
|
||||
|
||||
beginInsertRows(QModelIndex(), dive_site_table.nr + 2, dive_site_table.nr + 2);
|
||||
uint32_t uuid = create_dive_site_with_gps(name.toUtf8().data(), latitude, longitude);
|
||||
uint32_t uuid = create_dive_site_with_gps(name.toUtf8().data(), latitude, longitude, divetime);
|
||||
qSort(dive_site_table.dive_sites, dive_site_table.dive_sites + dive_site_table.nr, dive_site_less_than);
|
||||
internalRowCount = dive_site_table.nr;
|
||||
endInsertRows();
|
||||
|
|
|
@ -15,7 +15,7 @@ public:
|
|||
int columnCount(const QModelIndex &parent) const;
|
||||
int rowCount(const QModelIndex &parent = QModelIndex()) const;
|
||||
QVariant data(const QModelIndex &index = QModelIndex(), int role = Qt::DisplayRole) const;
|
||||
int32_t addDiveSite(const QString& name, int lat = 0, int lon = 0);
|
||||
int32_t addDiveSite(const QString& name, timestamp_t divetime, int lat = 0, int lon = 0);
|
||||
bool setData(const QModelIndex &index, const QVariant &value, int role);
|
||||
bool removeRows(int row, int count, const QModelIndex & parent = QModelIndex());
|
||||
void setFirstRowTextField(QLineEdit *textField);
|
||||
|
|
|
@ -897,7 +897,7 @@ void MainTab::updateDiveSite(int divenr)
|
|||
cd->dive_site_uuid = pickedUuid;
|
||||
} else if (!newName.isEmpty()) {
|
||||
// user entered a name but didn't pick a dive site, so copy that data
|
||||
uint32_t createdUuid = create_dive_site(displayed_dive_site.name);
|
||||
uint32_t createdUuid = create_dive_site(displayed_dive_site.name, cd->when);
|
||||
struct dive_site *newDs = get_dive_site_by_uuid(createdUuid);
|
||||
copy_dive_site(&displayed_dive_site, newDs);
|
||||
newDs->uuid = createdUuid; // the copy overwrote the uuid
|
||||
|
@ -914,7 +914,7 @@ void MainTab::updateDiveSite(int divenr)
|
|||
} else if (newName != origName) {
|
||||
if (newUuid == 0) {
|
||||
// so we created a new site, add it to the global list
|
||||
uint32_t createdUuid = create_dive_site(displayed_dive_site.name);
|
||||
uint32_t createdUuid = create_dive_site(displayed_dive_site.name, cd->when);
|
||||
struct dive_site *newDs = get_dive_site_by_uuid(createdUuid);
|
||||
copy_dive_site(&displayed_dive_site, newDs);
|
||||
newDs->uuid = createdUuid; // the copy overwrote the uuid
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
/*
|
||||
* api break introduced in libgit2 master after 0.22 - let's guess this is the v0.23 API
|
||||
*/
|
||||
#if USE_LIBGIT23_API
|
||||
#if USE_LIBGIT23_API || (!LIBGIT2_VER_MAJOR && LIBGIT2_VER_MINOR >= 23)
|
||||
#define git_branch_create(out, repo, branch_name, target, force, signature, log_message) \
|
||||
git_branch_create(out, repo, branch_name, target, force)
|
||||
#define git_reference_set_target(out, ref, id, author, log_message) \
|
||||
|
@ -997,7 +997,7 @@ static git_tree *get_git_tree(git_repository *repo, git_object *parent)
|
|||
return tree;
|
||||
}
|
||||
|
||||
static int update_git_checkout(git_repository *repo, git_object *parent, git_tree *tree)
|
||||
int update_git_checkout(git_repository *repo, git_object *parent, git_tree *tree)
|
||||
{
|
||||
git_checkout_options opts = GIT_CHECKOUT_OPTIONS_INIT;
|
||||
|
||||
|
|
|
@ -507,7 +507,8 @@ void save_dives_buffer(struct membuffer *b, const bool select_only)
|
|||
put_format(b, " <autogroup state='1' />\n");
|
||||
put_format(b, "</settings>\n");
|
||||
|
||||
/* save the dive sites */
|
||||
/* save the dive sites - to make the output consistent let's sort the table, first */
|
||||
dive_site_table_sort();
|
||||
put_format(b, "<divesites>\n");
|
||||
for (i = 0; i < dive_site_table.nr; i++) {
|
||||
int j;
|
||||
|
|
261
tests/testgitstorage.cpp
Normal file
261
tests/testgitstorage.cpp
Normal file
|
@ -0,0 +1,261 @@
|
|||
#include "testgitstorage.h"
|
||||
#include "dive.h"
|
||||
#include "divelist.h"
|
||||
#include "file.h"
|
||||
#include "git2.h"
|
||||
#include "prefs-macros.h"
|
||||
#include <QDir>
|
||||
#include <QTextStream>
|
||||
#include <QNetworkProxy>
|
||||
#include <QSettings>
|
||||
#include <QDebug>
|
||||
|
||||
// this is a local helper function in git-access.c
|
||||
extern "C" char *get_local_dir(const char *remote, const char *branch);
|
||||
|
||||
void TestGitStorage::testSetup()
|
||||
{
|
||||
// first, setup the preferences an proxy information
|
||||
prefs = default_prefs;
|
||||
QCoreApplication::setOrganizationName("Subsurface");
|
||||
QCoreApplication::setOrganizationDomain("subsurface.hohndel.org");
|
||||
QCoreApplication::setApplicationName("Subsurface");
|
||||
QSettings s;
|
||||
QVariant v;
|
||||
s.beginGroup("Network");
|
||||
GET_INT_DEF("proxy_type", proxy_type, QNetworkProxy::DefaultProxy);
|
||||
GET_TXT("proxy_host", proxy_host);
|
||||
GET_INT("proxy_port", proxy_port);
|
||||
GET_BOOL("proxy_auth", proxy_auth);
|
||||
GET_TXT("proxy_user", proxy_user);
|
||||
GET_TXT("proxy_pass", proxy_pass);
|
||||
s.endGroup();
|
||||
s.beginGroup("CloudStorage");
|
||||
GET_TXT("cloud_base_url", cloud_base_url);
|
||||
QString gitUrl(prefs.cloud_base_url);
|
||||
if (gitUrl.right(1) != "/")
|
||||
gitUrl += "/";
|
||||
prefs.cloud_git_url = strdup(qPrintable(gitUrl + "git"));
|
||||
s.endGroup();
|
||||
prefs.cloud_storage_email_encoded = strdup("ssrftest@hohndel.org");
|
||||
prefs.cloud_storage_password = strdup("geheim");
|
||||
prefs.cloud_background_sync = true;
|
||||
QNetworkProxy proxy;
|
||||
proxy.setType(QNetworkProxy::ProxyType(prefs.proxy_type));
|
||||
proxy.setHostName(prefs.proxy_host);
|
||||
proxy.setPort(prefs.proxy_port);
|
||||
if (prefs.proxy_auth) {
|
||||
proxy.setUser(prefs.proxy_user);
|
||||
proxy.setPassword(prefs.proxy_pass);
|
||||
}
|
||||
QNetworkProxy::setApplicationProxy(proxy);
|
||||
|
||||
// now cleanup the cache dir in case there's something weird from previous runs
|
||||
QString localCacheDir(get_local_dir("https://cloud.subsurface-divelog.org/git/ssrftest@hohndel.org", "ssrftest@hohndel.org"));
|
||||
QDir localCacheDirectory(localCacheDir);
|
||||
QCOMPARE(localCacheDirectory.removeRecursively(), true);
|
||||
}
|
||||
|
||||
void TestGitStorage::testGitStorageLocal()
|
||||
{
|
||||
// test writing and reading back from local git storage
|
||||
git_repository *repo;
|
||||
git_libgit2_init();
|
||||
QCOMPARE(parse_file(SUBSURFACE_SOURCE "/dives/SampleDivesV2.ssrf"), 0);
|
||||
QString testDirName("./gittest");
|
||||
QDir testDir(testDirName);
|
||||
QCOMPARE(testDir.removeRecursively(), true);
|
||||
QCOMPARE(QDir().mkdir(testDirName), true);
|
||||
QCOMPARE(git_repository_init(&repo, qPrintable(testDirName), false), 0);
|
||||
QCOMPARE(save_dives(qPrintable(testDirName + "[test]")), 0);
|
||||
QCOMPARE(save_dives("./SampleDivesV3.ssrf"), 0);
|
||||
clear_dive_file_data();
|
||||
QCOMPARE(parse_file(qPrintable(testDirName + "[test]")), 0);
|
||||
QCOMPARE(save_dives("./SampleDivesV3viagit.ssrf"), 0);
|
||||
QFile org("./SampleDivesV3.ssrf");
|
||||
org.open(QFile::ReadOnly);
|
||||
QFile out("./SampleDivesV3viagit.ssrf");
|
||||
out.open(QFile::ReadOnly);
|
||||
QTextStream orgS(&org);
|
||||
QTextStream outS(&out);
|
||||
QString readin = orgS.readAll();
|
||||
QString written = outS.readAll();
|
||||
QCOMPARE(readin, written);
|
||||
clear_dive_file_data();
|
||||
}
|
||||
|
||||
void TestGitStorage::testGitStorageCloud()
|
||||
{
|
||||
// test writing and reading back from cloud storage
|
||||
// connect to the ssrftest repository on the cloud server
|
||||
// and repeat the same test as before with the local git storage
|
||||
QString cloudTestRepo("https://cloud.subsurface-divelog.org/git/ssrftest@hohndel.org[ssrftest@hohndel.org]");
|
||||
QCOMPARE(parse_file(SUBSURFACE_SOURCE "/dives/SampleDivesV2.ssrf"), 0);
|
||||
QCOMPARE(save_dives(qPrintable(cloudTestRepo)), 0);
|
||||
clear_dive_file_data();
|
||||
QCOMPARE(parse_file(qPrintable(cloudTestRepo)), 0);
|
||||
QCOMPARE(save_dives("./SampleDivesV3viacloud.ssrf"), 0);
|
||||
QFile org("./SampleDivesV3.ssrf");
|
||||
org.open(QFile::ReadOnly);
|
||||
QFile out("./SampleDivesV3viacloud.ssrf");
|
||||
out.open(QFile::ReadOnly);
|
||||
QTextStream orgS(&org);
|
||||
QTextStream outS(&out);
|
||||
QString readin = orgS.readAll();
|
||||
QString written = outS.readAll();
|
||||
QCOMPARE(readin, written);
|
||||
clear_dive_file_data();
|
||||
}
|
||||
|
||||
void TestGitStorage::testGitStorageCloudOfflineSync()
|
||||
{
|
||||
// make a change to local cache repo (pretending that we did some offline changes)
|
||||
// and then open the remote one again and check that things were propagated correctly
|
||||
QString cloudTestRepo("https://cloud.subsurface-divelog.org/git/ssrftest@hohndel.org[ssrftest@hohndel.org]");
|
||||
QString localCacheDir(get_local_dir("https://cloud.subsurface-divelog.org/git/ssrftest@hohndel.org", "ssrftest@hohndel.org"));
|
||||
QString localCacheRepo = localCacheDir + "[ssrftest@hohndel.org]";
|
||||
// read the local repo from the previous test and add dive 10
|
||||
QCOMPARE(parse_file(qPrintable(localCacheRepo)), 0);
|
||||
QCOMPARE(parse_file(SUBSURFACE_SOURCE "/dives/test10.xml"), 0);
|
||||
// calling process_dive() sorts the table, but calling it with
|
||||
// is_imported == true causes it to try to update the window title... let's not do that
|
||||
process_dives(false, false);
|
||||
// now save only to the local cache but not to the remote server
|
||||
QCOMPARE(save_dives(qPrintable(localCacheRepo)), 0);
|
||||
QCOMPARE(save_dives("./SampleDivesV3plus10local.ssrf"), 0);
|
||||
clear_dive_file_data();
|
||||
// open the cloud storage and compare
|
||||
QCOMPARE(parse_file(qPrintable(cloudTestRepo)), 0);
|
||||
QCOMPARE(save_dives("./SampleDivesV3plus10viacloud.ssrf"), 0);
|
||||
QFile org("./SampleDivesV3plus10local.ssrf");
|
||||
org.open(QFile::ReadOnly);
|
||||
QFile out("./SampleDivesV3plus10viacloud.ssrf");
|
||||
out.open(QFile::ReadOnly);
|
||||
QTextStream orgS(&org);
|
||||
QTextStream outS(&out);
|
||||
QString readin = orgS.readAll();
|
||||
QString written = outS.readAll();
|
||||
QCOMPARE(readin, written);
|
||||
// write back out to cloud storage, move away the local cache, open again and compare
|
||||
QCOMPARE(save_dives(qPrintable(cloudTestRepo)), 0);
|
||||
clear_dive_file_data();
|
||||
QDir localCacheDirectory(localCacheDir);
|
||||
QDir localCacheDirectorySave(localCacheDir + "save");
|
||||
QCOMPARE(localCacheDirectorySave.removeRecursively(), true);
|
||||
QCOMPARE(localCacheDirectory.rename(localCacheDir, localCacheDir + "save"), true);
|
||||
QCOMPARE(parse_file(qPrintable(cloudTestRepo)), 0);
|
||||
QCOMPARE(save_dives("./SampleDivesV3plus10fromcloud.ssrf"), 0);
|
||||
org.close();
|
||||
org.open(QFile::ReadOnly);
|
||||
QFile out2("./SampleDivesV3plus10fromcloud.ssrf");
|
||||
out2.open(QFile::ReadOnly);
|
||||
QTextStream orgS2(&org);
|
||||
QTextStream outS2(&out2);
|
||||
readin = orgS2.readAll();
|
||||
written = outS2.readAll();
|
||||
QCOMPARE(readin, written);
|
||||
clear_dive_file_data();
|
||||
}
|
||||
|
||||
void TestGitStorage::testGitStorageCloudMerge()
|
||||
{
|
||||
// now we need to mess with the local git repo to get an actual merge
|
||||
// first we add another dive to the "moved away" repository, pretending we did
|
||||
// another offline change there
|
||||
QString cloudTestRepo("https://cloud.subsurface-divelog.org/git/ssrftest@hohndel.org[ssrftest@hohndel.org]");
|
||||
QString localCacheDir(get_local_dir("https://cloud.subsurface-divelog.org/git/ssrftest@hohndel.org", "ssrftest@hohndel.org"));
|
||||
QString localCacheRepoSave = localCacheDir + "save[ssrftest@hohndel.org]";
|
||||
QCOMPARE(parse_file(qPrintable(localCacheRepoSave)), 0);
|
||||
QCOMPARE(parse_file(SUBSURFACE_SOURCE "/dives/test11.xml"), 0);
|
||||
process_dives(false, false);
|
||||
QCOMPARE(save_dives(qPrintable(localCacheRepoSave)), 0);
|
||||
clear_dive_file_data();
|
||||
|
||||
// now we open the cloud storage repo and add a different dive to it
|
||||
QCOMPARE(parse_file(qPrintable(cloudTestRepo)), 0);
|
||||
QCOMPARE(parse_file(SUBSURFACE_SOURCE "/dives/test12.xml"), 0);
|
||||
process_dives(false, false);
|
||||
QCOMPARE(save_dives(qPrintable(cloudTestRepo)), 0);
|
||||
clear_dive_file_data();
|
||||
|
||||
// now we move the saved local cache into place and try to open the cloud repo
|
||||
// -> this forces a merge
|
||||
QDir localCacheDirectory(localCacheDir);
|
||||
QCOMPARE(localCacheDirectory.removeRecursively(), true);
|
||||
QDir localCacheDirectorySave(localCacheDir + "save");
|
||||
QCOMPARE(localCacheDirectory.rename(localCacheDir + "save", localCacheDir), true);
|
||||
QCOMPARE(parse_file(qPrintable(cloudTestRepo)), 0);
|
||||
QCOMPARE(save_dives("./SapleDivesV3plus10-11-12-merged.ssrf"), 0);
|
||||
clear_dive_file_data();
|
||||
QCOMPARE(parse_file("./SampleDivesV3plus10local.ssrf"), 0);
|
||||
QCOMPARE(parse_file(SUBSURFACE_SOURCE "/dives/test11.xml"), 0);
|
||||
process_dives(false, false);
|
||||
QCOMPARE(parse_file(SUBSURFACE_SOURCE "/dives/test12.xml"), 0);
|
||||
process_dives(false, false);
|
||||
QCOMPARE(save_dives("./SapleDivesV3plus10-11-12.ssrf"), 0);
|
||||
QFile org("./SapleDivesV3plus10-11-12-merged.ssrf");
|
||||
org.open(QFile::ReadOnly);
|
||||
QFile out("./SapleDivesV3plus10-11-12.ssrf");
|
||||
out.open(QFile::ReadOnly);
|
||||
QTextStream orgS(&org);
|
||||
QTextStream outS(&out);
|
||||
QString readin = orgS.readAll();
|
||||
QString written = outS.readAll();
|
||||
QCOMPARE(readin, written);
|
||||
clear_dive_file_data();
|
||||
}
|
||||
|
||||
void TestGitStorage::testGitStorageCloudMerge2()
|
||||
{
|
||||
// delete a dive offline
|
||||
// edit the same dive in the cloud repo
|
||||
// merge
|
||||
QString cloudTestRepo("https://cloud.subsurface-divelog.org/git/ssrftest@hohndel.org[ssrftest@hohndel.org]");
|
||||
QString localCacheDir(get_local_dir("https://cloud.subsurface-divelog.org/git/ssrftest@hohndel.org", "ssrftest@hohndel.org"));
|
||||
QString localCacheRepo = localCacheDir + "[ssrftest@hohndel.org]";
|
||||
QCOMPARE(parse_file(qPrintable(localCacheRepo)), 0);
|
||||
process_dives(false, false);
|
||||
struct dive *dive = get_dive(1);
|
||||
delete_single_dive(1);
|
||||
QCOMPARE(save_dives("./SampleDivesMinus1.ssrf"), 0);
|
||||
QCOMPARE(save_dives(qPrintable(localCacheRepo)), 0);
|
||||
clear_dive_file_data();
|
||||
|
||||
// move the local cache away
|
||||
{ // scope for variables
|
||||
QDir localCacheDirectory(localCacheDir);
|
||||
QDir localCacheDirectorySave(localCacheDir + "save");
|
||||
QCOMPARE(localCacheDirectorySave.removeRecursively(), true);
|
||||
QCOMPARE(localCacheDirectory.rename(localCacheDir, localCacheDir + "save"), true);
|
||||
}
|
||||
// now we open the cloud storage repo and modify that first dive
|
||||
QCOMPARE(parse_file(qPrintable(cloudTestRepo)), 0);
|
||||
process_dives(false, false);
|
||||
dive = get_dive(1);
|
||||
free(dive->notes);
|
||||
dive->notes = strdup("These notes have been modified by TestGitStorage");
|
||||
QCOMPARE(save_dives(qPrintable(cloudTestRepo)), 0);
|
||||
clear_dive_file_data();
|
||||
|
||||
// now we move the saved local cache into place and try to open the cloud repo
|
||||
// -> this forces a merge
|
||||
QDir localCacheDirectory(localCacheDir);
|
||||
QDir localCacheDirectorySave(localCacheDir + "save");
|
||||
QCOMPARE(localCacheDirectory.removeRecursively(), true);
|
||||
QCOMPARE(localCacheDirectorySave.rename(localCacheDir + "save", localCacheDir), true);
|
||||
|
||||
QCOMPARE(parse_file(qPrintable(cloudTestRepo)), 0);
|
||||
QCOMPARE(save_dives("./SampleDivesMinus1-merged.ssrf"), 0);
|
||||
QCOMPARE(save_dives(qPrintable(cloudTestRepo)), 0);
|
||||
QFile org("./SampleDivesMinus1-merged.ssrf");
|
||||
org.open(QFile::ReadOnly);
|
||||
QFile out("./SampleDivesMinus1.ssrf");
|
||||
out.open(QFile::ReadOnly);
|
||||
QTextStream orgS(&org);
|
||||
QTextStream outS(&out);
|
||||
QString readin = orgS.readAll();
|
||||
QString written = outS.readAll();
|
||||
QCOMPARE(readin, written);
|
||||
}
|
||||
|
||||
QTEST_MAIN(TestGitStorage)
|
18
tests/testgitstorage.h
Normal file
18
tests/testgitstorage.h
Normal file
|
@ -0,0 +1,18 @@
|
|||
#ifndef TESTGITSTORAGE_H
|
||||
#define TESTGITSTORAGE_H
|
||||
|
||||
#include <QTest>
|
||||
|
||||
class TestGitStorage : public QObject
|
||||
{
|
||||
Q_OBJECT
|
||||
private slots:
|
||||
void testSetup();
|
||||
void testGitStorageLocal();
|
||||
void testGitStorageCloud();
|
||||
void testGitStorageCloudOfflineSync();
|
||||
void testGitStorageCloudMerge();
|
||||
void testGitStorageCloudMerge2();
|
||||
};
|
||||
|
||||
#endif // TESTGITSTORAGE_H
|
|
@ -803,7 +803,7 @@ static bool process_raw_buffer(device_data_t *devdata, uint32_t deviceid, char *
|
|||
if (for_dive)
|
||||
*for_dive = atoi(val);
|
||||
} else if (!log && dive && !strcmp(tag, "divespot_id")) {
|
||||
dive->dive_site_uuid = create_dive_site("from Uemis");
|
||||
dive->dive_site_uuid = create_dive_site("from Uemis", dive->when);
|
||||
track_divespot(val, dive->dc.diveid, dive->dive_site_uuid);
|
||||
} else if (dive) {
|
||||
parse_tag(dive, tag, val);
|
||||
|
|
Loading…
Add table
Reference in a new issue