2017-04-27 18:18:03 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2018-05-22 07:07:42 +00:00
|
|
|
#include "ssrf.h"
|
2012-01-26 21:00:45 +00:00
|
|
|
#include <unistd.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <stdlib.h>
|
2012-01-27 01:43:33 +00:00
|
|
|
#include <string.h>
|
2012-01-26 21:00:45 +00:00
|
|
|
#include <errno.h>
|
2013-10-06 15:55:58 +00:00
|
|
|
#include "gettext.h"
|
2013-05-11 19:33:46 +00:00
|
|
|
#include <zip.h>
|
2013-10-19 05:17:13 +00:00
|
|
|
#include <time.h>
|
2012-01-26 21:00:45 +00:00
|
|
|
|
|
|
|
#include "dive.h"
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
#include "divelog.h"
|
2018-05-11 15:25:41 +00:00
|
|
|
#include "subsurface-string.h"
|
2019-08-05 17:41:15 +00:00
|
|
|
#include "errorhelper.h"
|
2012-01-27 20:43:40 +00:00
|
|
|
#include "file.h"
|
2015-06-13 15:01:06 +00:00
|
|
|
#include "git-access.h"
|
2018-02-24 22:28:13 +00:00
|
|
|
#include "qthelper.h"
|
2018-01-06 20:24:38 +00:00
|
|
|
#include "import-csv.h"
|
2019-03-03 21:29:40 +00:00
|
|
|
#include "parse.h"
|
2012-01-26 21:00:45 +00:00
|
|
|
|
2014-11-19 21:14:22 +00:00
|
|
|
/* For SAMPLE_* */
|
|
|
|
#include <libdivecomputer/parser.h>
|
|
|
|
|
2015-02-18 05:58:34 +00:00
|
|
|
/* to check XSLT version number */
|
|
|
|
#include <libxslt/xsltconfig.h>
|
|
|
|
|
2012-08-24 22:39:00 +00:00
|
|
|
/* Crazy windows sh*t */
|
|
|
|
#ifndef O_BINARY
|
|
|
|
#define O_BINARY 0
|
|
|
|
#endif
|
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
std::pair<std::string, int> readfile(const char *filename)
|
2012-01-26 21:00:45 +00:00
|
|
|
{
|
2012-07-12 22:28:47 +00:00
|
|
|
int ret, fd;
|
2012-01-26 21:00:45 +00:00
|
|
|
struct stat st;
|
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
std::string res;
|
2013-12-19 13:00:51 +00:00
|
|
|
fd = subsurface_open(filename, O_RDONLY | O_BINARY, 0);
|
2012-01-26 21:00:45 +00:00
|
|
|
if (fd < 0)
|
2024-03-01 12:09:20 +00:00
|
|
|
return std::make_pair(res, fd);
|
2012-01-26 21:00:45 +00:00
|
|
|
ret = fstat(fd, &st);
|
|
|
|
if (ret < 0)
|
2024-03-01 12:09:20 +00:00
|
|
|
return std::make_pair(res, ret);
|
2012-01-26 21:00:45 +00:00
|
|
|
if (!S_ISREG(st.st_mode))
|
2024-03-01 12:09:20 +00:00
|
|
|
return std::make_pair(res, -EINVAL);
|
2012-01-26 21:00:45 +00:00
|
|
|
if (!st.st_size)
|
2024-03-01 12:09:20 +00:00
|
|
|
return std::make_pair(res, 0);
|
|
|
|
// Sadly, this 0-initializes the string, just before overwriting it.
|
|
|
|
// However, we use std::string, because that automatically 0-terminates
|
|
|
|
// the data and the code expects that.
|
|
|
|
res.resize(st.st_size);
|
|
|
|
ret = read(fd, res.data(), res.size());
|
2012-01-26 21:00:45 +00:00
|
|
|
if (ret < 0)
|
2024-03-01 12:09:20 +00:00
|
|
|
return std::make_pair(res, ret);
|
|
|
|
// converting to int loses a bit but size will never be that big
|
|
|
|
if (ret == (int)res.size()) {
|
|
|
|
return std::make_pair(res, ret);
|
|
|
|
} else {
|
|
|
|
errno = EIO;
|
|
|
|
return std::make_pair(res, -1);
|
|
|
|
}
|
2012-01-26 21:00:45 +00:00
|
|
|
}
|
|
|
|
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
static void zip_read(struct zip_file *file, const char *filename, struct divelog *log)
|
2012-01-27 01:43:33 +00:00
|
|
|
{
|
|
|
|
int size = 1024, n, read = 0;
|
2024-03-01 12:09:20 +00:00
|
|
|
std::vector<char> mem(size + 1);
|
2012-01-27 01:43:33 +00:00
|
|
|
|
2024-02-28 06:50:11 +00:00
|
|
|
while ((n = zip_fread(file, mem.data() + read, size - read)) > 0) {
|
2012-01-27 01:43:33 +00:00
|
|
|
read += n;
|
|
|
|
size = read * 3 / 2;
|
2024-03-01 12:09:20 +00:00
|
|
|
mem.resize(size + 1);
|
2012-01-27 01:43:33 +00:00
|
|
|
}
|
2013-03-17 05:12:23 +00:00
|
|
|
mem[read] = 0;
|
2024-02-28 06:50:11 +00:00
|
|
|
(void) parse_xml_buffer(filename, mem.data(), read, log, NULL);
|
2012-01-27 01:43:33 +00:00
|
|
|
}
|
|
|
|
|
2024-02-28 06:50:11 +00:00
|
|
|
extern "C" int try_to_open_zip(const char *filename, struct divelog *log)
|
2012-01-27 01:43:33 +00:00
|
|
|
{
|
|
|
|
int success = 0;
|
2012-01-27 18:56:36 +00:00
|
|
|
/* Grr. libzip needs to re-open the file, it can't take a buffer */
|
2013-12-19 13:00:51 +00:00
|
|
|
struct zip *zip = subsurface_zip_open_readonly(filename, ZIP_CHECKCONS, NULL);
|
2012-01-27 01:43:33 +00:00
|
|
|
|
|
|
|
if (zip) {
|
|
|
|
int index;
|
2014-02-28 04:09:57 +00:00
|
|
|
for (index = 0;; index++) {
|
2012-01-27 01:43:33 +00:00
|
|
|
struct zip_file *file = zip_fopen_index(zip, index, 0);
|
|
|
|
if (!file)
|
|
|
|
break;
|
2014-10-13 18:31:01 +00:00
|
|
|
/* skip parsing the divelogs.de pictures */
|
|
|
|
if (strstr(zip_get_name(zip, index, 0), "pictures/"))
|
|
|
|
continue;
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
zip_read(file, filename, log);
|
2012-01-27 01:43:33 +00:00
|
|
|
zip_fclose(file);
|
|
|
|
success++;
|
|
|
|
}
|
2013-12-19 13:00:51 +00:00
|
|
|
subsurface_zip_close(zip);
|
2015-10-31 17:29:41 +00:00
|
|
|
|
|
|
|
if (!success)
|
|
|
|
return report_error(translate("gettextFromC", "No dives in the input file '%s'"), filename);
|
2012-01-27 01:43:33 +00:00
|
|
|
}
|
|
|
|
return success;
|
|
|
|
}
|
|
|
|
|
2024-02-28 06:50:11 +00:00
|
|
|
static int db_test_func(void *, int, char **data, char **)
|
2014-02-15 06:36:50 +00:00
|
|
|
{
|
|
|
|
return *data[0] == '0';
|
|
|
|
}
|
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
static int try_to_open_db(const char *filename, std::string &mem, struct divelog *log)
|
2013-03-05 05:10:39 +00:00
|
|
|
{
|
2014-02-15 06:36:49 +00:00
|
|
|
sqlite3 *handle;
|
2014-02-15 06:36:50 +00:00
|
|
|
char dm4_test[] = "select count(*) from sqlite_master where type='table' and name='Dive' and sql like '%ProfileBlob%'";
|
2014-11-15 15:34:20 +00:00
|
|
|
char dm5_test[] = "select count(*) from sqlite_master where type='table' and name='Dive' and sql like '%SampleBlob%'";
|
2014-02-15 06:36:50 +00:00
|
|
|
char shearwater_test[] = "select count(*) from sqlite_master where type='table' and name='system' and sql like '%dbVersion%'";
|
2018-12-29 19:32:55 +00:00
|
|
|
char shearwater_cloud_test[] = "select count(*) from sqlite_master where type='table' and name='SyncV3MetadataDiveLog' and sql like '%CreatedDevice%'";
|
2014-12-20 16:19:43 +00:00
|
|
|
char cobalt_test[] = "select count(*) from sqlite_master where type='table' and name='TrackPoints' and sql like '%DepthPressure%'";
|
2015-07-12 17:46:48 +00:00
|
|
|
char divinglog_test[] = "select count(*) from sqlite_master where type='table' and name='DBInfo' and sql like '%PrgName%'";
|
2020-07-17 01:40:46 +00:00
|
|
|
char seacsync_test[] = "select count(*) from sqlite_master where type='table' and name='dive_data' and sql like '%ndl_tts_s%'";
|
2014-02-15 06:36:49 +00:00
|
|
|
int retval;
|
|
|
|
|
|
|
|
retval = sqlite3_open(filename, &handle);
|
|
|
|
|
|
|
|
if (retval) {
|
2015-07-02 18:22:22 +00:00
|
|
|
fprintf(stderr, "Database connection failed '%s'.\n", filename);
|
2014-02-15 06:36:49 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-11-15 15:34:20 +00:00
|
|
|
/* Testing if DB schema resembles Suunto DM5 database format */
|
|
|
|
retval = sqlite3_exec(handle, dm5_test, &db_test_func, 0, NULL);
|
|
|
|
if (!retval) {
|
2024-03-01 12:09:20 +00:00
|
|
|
retval = parse_dm5_buffer(handle, filename, mem.data(), mem.size(), log);
|
2014-11-15 15:34:20 +00:00
|
|
|
sqlite3_close(handle);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2014-02-15 06:36:50 +00:00
|
|
|
/* Testing if DB schema resembles Suunto DM4 database format */
|
|
|
|
retval = sqlite3_exec(handle, dm4_test, &db_test_func, 0, NULL);
|
|
|
|
if (!retval) {
|
2024-03-01 12:09:20 +00:00
|
|
|
retval = parse_dm4_buffer(handle, filename, mem.data(), mem.size(), log);
|
2014-02-15 06:36:50 +00:00
|
|
|
sqlite3_close(handle);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Testing if DB schema resembles Shearwater database format */
|
|
|
|
retval = sqlite3_exec(handle, shearwater_test, &db_test_func, 0, NULL);
|
|
|
|
if (!retval) {
|
2024-03-01 12:09:20 +00:00
|
|
|
retval = parse_shearwater_buffer(handle, filename, mem.data(), mem.size(), log);
|
2014-12-20 16:19:43 +00:00
|
|
|
sqlite3_close(handle);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2018-12-29 19:32:55 +00:00
|
|
|
/* Testing if DB schema resembles Shearwater cloud database format */
|
|
|
|
retval = sqlite3_exec(handle, shearwater_cloud_test, &db_test_func, 0, NULL);
|
|
|
|
if (!retval) {
|
2024-03-01 12:09:20 +00:00
|
|
|
retval = parse_shearwater_cloud_buffer(handle, filename, mem.data(), mem.size(), log);
|
2018-12-29 19:32:55 +00:00
|
|
|
sqlite3_close(handle);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2014-12-20 16:19:43 +00:00
|
|
|
/* Testing if DB schema resembles Atomic Cobalt database format */
|
|
|
|
retval = sqlite3_exec(handle, cobalt_test, &db_test_func, 0, NULL);
|
|
|
|
if (!retval) {
|
2024-03-01 12:09:20 +00:00
|
|
|
retval = parse_cobalt_buffer(handle, filename, mem.data(), mem.size(), log);
|
2014-02-15 06:36:50 +00:00
|
|
|
sqlite3_close(handle);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2015-07-12 17:46:48 +00:00
|
|
|
/* Testing if DB schema resembles Divinglog database format */
|
|
|
|
retval = sqlite3_exec(handle, divinglog_test, &db_test_func, 0, NULL);
|
|
|
|
if (!retval) {
|
2024-03-01 12:09:20 +00:00
|
|
|
retval = parse_divinglog_buffer(handle, filename, mem.data(), mem.size(), log);
|
2015-07-12 17:46:48 +00:00
|
|
|
sqlite3_close(handle);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2020-07-17 01:40:46 +00:00
|
|
|
/* Testing if DB schema resembles Seac database format */
|
|
|
|
retval = sqlite3_exec(handle, seacsync_test, &db_test_func, 0, NULL);
|
|
|
|
if (!retval) {
|
2024-03-01 12:09:20 +00:00
|
|
|
retval = parse_seac_buffer(handle, filename, mem.data(), mem.size(), log);
|
2020-07-17 01:40:46 +00:00
|
|
|
sqlite3_close(handle);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-02-15 06:36:49 +00:00
|
|
|
sqlite3_close(handle);
|
|
|
|
|
|
|
|
return retval;
|
2013-03-05 05:10:39 +00:00
|
|
|
}
|
|
|
|
|
2012-06-20 03:07:42 +00:00
|
|
|
/*
|
|
|
|
* Cochran comma-separated values: depth in feet, temperature in F, pressure in psi.
|
|
|
|
*
|
|
|
|
* They start with eight comma-separated fields like:
|
|
|
|
*
|
|
|
|
* filename: {C:\Analyst4\can\T036785.can},{C:\Analyst4\can\K031892.can}
|
|
|
|
* divenr: %d
|
|
|
|
* datetime: {03Sep11 16:37:22},{15Dec11 18:27:02}
|
|
|
|
* ??: 1
|
|
|
|
* serialnr??: {CCI134},{CCI207}
|
|
|
|
* computer??: {GeminiII},{CommanderIII}
|
|
|
|
* computer??: {GeminiII},{CommanderIII}
|
|
|
|
* ??: 1
|
|
|
|
*
|
|
|
|
* Followed by the data values (all comma-separated, all one long line).
|
|
|
|
*/
|
2024-03-01 12:09:20 +00:00
|
|
|
static int open_by_filename(const char *filename, const char *fmt, std::string &mem, struct divelog *log)
|
2012-01-27 01:43:33 +00:00
|
|
|
{
|
2014-12-08 21:09:34 +00:00
|
|
|
// hack to be able to provide a comment for the translated string
|
2024-02-28 06:50:11 +00:00
|
|
|
static struct { const char *s; const char *comment; } csv_warning =
|
|
|
|
QT_TRANSLATE_NOOP3("gettextFromC",
|
|
|
|
"Cannot open CSV file %s; please use Import log file dialog",
|
|
|
|
"'Import log file' should be the same text as corresponding label in Import menu");
|
2014-12-08 21:09:34 +00:00
|
|
|
|
2013-09-17 18:23:54 +00:00
|
|
|
/* Suunto Dive Manager files: SDE, ZIP; divelogs.de files: DLD */
|
|
|
|
if (!strcasecmp(fmt, "SDE") || !strcasecmp(fmt, "ZIP") || !strcasecmp(fmt, "DLD"))
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
return try_to_open_zip(filename, log);
|
2012-01-27 01:43:33 +00:00
|
|
|
|
2013-09-29 12:44:38 +00:00
|
|
|
/* CSV files */
|
2014-01-15 13:59:25 +00:00
|
|
|
if (!strcasecmp(fmt, "CSV"))
|
2024-02-28 06:50:11 +00:00
|
|
|
return report_error(translate("gettextFromC", csv_warning.s), filename);
|
2012-01-27 20:43:40 +00:00
|
|
|
/* Truly nasty intentionally obfuscated Cochran Anal software */
|
|
|
|
if (!strcasecmp(fmt, "CAN"))
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
return try_to_open_cochran(filename, mem, log);
|
2012-06-20 03:07:42 +00:00
|
|
|
/* Cochran export comma-separated-value files */
|
|
|
|
if (!strcasecmp(fmt, "DPT"))
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
return try_to_open_csv(mem, CSV_DEPTH, log);
|
2014-11-07 16:30:44 +00:00
|
|
|
if (!strcasecmp(fmt, "LVD"))
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
return try_to_open_liquivision(filename, mem, log);
|
2012-06-20 03:07:42 +00:00
|
|
|
if (!strcasecmp(fmt, "TMP"))
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
return try_to_open_csv(mem, CSV_TEMP, log);
|
2012-06-20 03:07:42 +00:00
|
|
|
if (!strcasecmp(fmt, "HP1"))
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
return try_to_open_csv(mem, CSV_PRESSURE, log);
|
2012-06-20 03:07:42 +00:00
|
|
|
|
2012-01-27 01:43:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
static int parse_file_buffer(const char *filename, std::string &mem, struct divelog *log)
|
2012-01-27 18:56:36 +00:00
|
|
|
{
|
2014-12-08 19:26:03 +00:00
|
|
|
int ret;
|
2024-02-28 06:50:11 +00:00
|
|
|
const char *fmt = strrchr(filename, '.');
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
if (fmt && (ret = open_by_filename(filename, fmt + 1, mem, log)) != 0)
|
2014-12-08 19:26:03 +00:00
|
|
|
return ret;
|
2012-01-27 18:56:36 +00:00
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
if (mem.empty())
|
2014-12-08 19:26:03 +00:00
|
|
|
return report_error("Out of memory parsing file %s\n", filename);
|
2013-12-09 06:42:51 +00:00
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
return parse_xml_buffer(filename, mem.data(), mem.size(), log, NULL);
|
2012-01-27 18:56:36 +00:00
|
|
|
}
|
|
|
|
|
2024-02-28 06:50:11 +00:00
|
|
|
extern "C" bool remote_repo_uptodate(const char *filename, struct git_info *info)
|
2016-01-05 01:48:34 +00:00
|
|
|
{
|
2024-02-28 07:49:42 +00:00
|
|
|
std::string current_sha = saved_git_id;
|
2022-04-13 16:43:37 +00:00
|
|
|
|
|
|
|
if (is_git_repository(filename, info) && open_git_repository(info)) {
|
2024-02-29 12:57:26 +00:00
|
|
|
std::string sha = get_sha(info->repo, info->branch);
|
2024-02-28 07:49:42 +00:00
|
|
|
if (!sha.empty() && current_sha == sha) {
|
2024-02-29 12:57:26 +00:00
|
|
|
fprintf(stderr, "already have loaded SHA %s - don't load again\n", sha.c_str());
|
2022-04-18 21:37:55 +00:00
|
|
|
return true;
|
2016-01-05 01:48:34 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-13 16:43:37 +00:00
|
|
|
|
|
|
|
// Either the repository couldn't be opened, or the SHA couldn't
|
|
|
|
// be found.
|
2022-04-18 21:37:55 +00:00
|
|
|
return false;
|
2016-01-05 01:48:34 +00:00
|
|
|
}
|
|
|
|
|
2024-02-28 06:50:11 +00:00
|
|
|
extern "C" int parse_file(const char *filename, struct divelog *log)
|
2012-01-26 21:00:45 +00:00
|
|
|
{
|
2022-04-13 16:43:37 +00:00
|
|
|
struct git_info info;
|
2024-02-28 06:50:11 +00:00
|
|
|
const char *fmt;
|
2012-01-26 21:00:45 +00:00
|
|
|
|
2022-04-13 16:43:37 +00:00
|
|
|
if (is_git_repository(filename, &info)) {
|
|
|
|
if (!open_git_repository(&info)) {
|
|
|
|
/*
|
|
|
|
* Opening the cloud storage repository failed for some reason
|
|
|
|
* give up here and don't send errors about git repositories
|
|
|
|
*/
|
2022-04-18 21:36:00 +00:00
|
|
|
if (info.is_subsurface_cloud) {
|
|
|
|
cleanup_git_info(&info);
|
2022-04-13 16:43:37 +00:00
|
|
|
return -1;
|
2022-04-18 21:36:00 +00:00
|
|
|
}
|
2022-04-13 16:43:37 +00:00
|
|
|
}
|
2021-04-11 00:40:30 +00:00
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
int ret = git_load_dives(&info, log);
|
2022-04-18 21:36:00 +00:00
|
|
|
cleanup_git_info(&info);
|
|
|
|
return ret;
|
2022-04-13 16:43:37 +00:00
|
|
|
}
|
2014-03-12 21:12:58 +00:00
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
auto [mem, err] = readfile(filename);
|
|
|
|
if (err < 0) {
|
2017-12-13 19:10:04 +00:00
|
|
|
/* we don't want to display an error if this was the default file */
|
|
|
|
if (same_string(filename, prefs.default_filename))
|
2014-03-14 18:26:07 +00:00
|
|
|
return 0;
|
2012-11-10 14:32:06 +00:00
|
|
|
|
2014-03-14 18:26:07 +00:00
|
|
|
return report_error(translate("gettextFromC", "Failed to read '%s'"), filename);
|
2024-03-01 12:09:20 +00:00
|
|
|
} else if (err == 0) {
|
2015-08-05 16:01:49 +00:00
|
|
|
return report_error(translate("gettextFromC", "Empty file '%s'"), filename);
|
2012-01-26 21:00:45 +00:00
|
|
|
}
|
|
|
|
|
2013-03-05 05:10:39 +00:00
|
|
|
fmt = strrchr(filename, '.');
|
2015-07-12 17:46:47 +00:00
|
|
|
if (fmt && (!strcasecmp(fmt + 1, "DB") || !strcasecmp(fmt + 1, "BAK") || !strcasecmp(fmt + 1, "SQL"))) {
|
2024-03-01 12:09:20 +00:00
|
|
|
if (!try_to_open_db(filename, mem, log))
|
2014-03-14 18:26:07 +00:00
|
|
|
return 0;
|
2013-03-05 05:10:39 +00:00
|
|
|
}
|
|
|
|
|
2014-12-27 20:10:44 +00:00
|
|
|
/* Divesoft Freedom */
|
2024-03-01 12:09:20 +00:00
|
|
|
if (fmt && (!strcasecmp(fmt + 1, "DLF")))
|
|
|
|
return parse_dlf_buffer((unsigned char *)mem.data(), mem.size(), log);
|
2014-12-27 20:10:44 +00:00
|
|
|
|
Import Datatrak/WLog files
Sequentially parses a file, expected to be a Datatrak/WLog divelog, and
converts the dive info into Subsurface's dive structure.
As my first DC, back in 90s, was an Aladin Air X, the obvious choice of log
software was DTrak (Win version). After using it for some time we moved to WLog
(shareware software more user friendly than Dtrak, printing capable, and still
better, it runs under wine, which, as linux user, was definitive for me). Then,
some years later, my last Aladin died and I moved to an OSTC, forcing me to
look for a software that support this DC.
I found JDivelog which was capable of import Dtrak logs and used it for some
time until discovered Subsurface existence and devoted to it.
The fact was that importing Dtrak dives in JDivelog and then re-importing them
in Subsurface caused a significant data loss (mainly in the profile events and
alarms) and weird location of some other info in the dive notes (mostly tag
items in the original Dtrak software). This situation can't actually be solved
with tools like divelogs.de which causes similar if no greater data loss.
Although this won't be a core feature for Subsurface, I expect it can be useful
for some other divers as has been for me.
Comments and issues:
Datatrak/Wlog files include a lot of diving data which are not directly
supported in Subsurface, in these cases we choose mostly to use "tags".
The lack of some important info in Datatrak archives (e.g. tank's initial
pressure) forces us to do some arbitrary assumptions (e.g. initial pressure =
200 bar).
There might be archives coming directly from old DOS days, as first versions
of Datatrak run on that OS; they were coded CP437 or CP850, while dive logs
coming from Win versions seems to be coded CP1252. Finally, Wlog seems to use a
mixed confusing style. Program directly converts some of the old encoded chars
to iso8859 but is expected there be some issues with non alphabetic chars, e.g.
"ª".
There are two text fields: "Other activities" and "Dive notes", both limited to
256 char size. We have merged them in Subsurface's "Dive Notes" although the
first one could be "tagged", but we're unsure that the user had filled it in
a tag friendly way.
WLog adds some information to the dive and lets the user to write more than
256 chars notes. This is achieved, while keeping compatibility with DTrak
divelogs, by adding a complementary file named equally as the .log file and
with .add extension where all this info is stored. We have, still, not worked
with this complementary files.
This work is based on the paper referenced in butracker #194 which has some
errors (e.g. beginning of log and beginning of dive are changed) and a lot of
bytes of unknown meaning. Example.log shows, at least, one more byte than those
referred in the paper for the O2 Aladin computer, this could be a byte referred
to the use of SCR but the lack of an OC dive with O2 computer makes impossible
for us to compare.
The only way we have figured out to distinguish a priori between SCR and non
SCR dives with O2 computers is that the dives are tagged with a "rebreather"
tag. Obviously this is not a very trusty way of doing things. In SCR dives,
the O2% in mix means, probably, the maximum O2% in the circuit, not the O2%
of the EAN mix in the tanks, which would be unknown in this case.
The list of DCs related in bug #194 paper seems incomplete, we have added
one or two from WLog and discarded those which are known to exist but whose
model is unknown, grouping them under the imaginative name of "unknown". The
list can easily be increased in the future if we ever know the models
identifiers.
BTW, in Example.log, 0x00 identifier is used for some DC dives and from my own
divelogs is inferred that 0x00 is used for manually entered dives, this could
easily be an error in Example.log coming from a preproduction DC model.
Example.log which is shipped in datatrak package is included in dives
directory for testing pourposes.
[Dirk Hohndel: some small cleanups, merged with latest master, support
divesites, remove the pointless memset() before free() calls
add to cmake build]
Signed-off-by: Salvador Cuñat <salvador.cunat@gmail.com>
Signed-off-by: Dirk Hohndel <dirk@hohndel.org>
2014-11-05 18:38:27 +00:00
|
|
|
/* DataTrak/Wlog */
|
2015-03-10 22:23:14 +00:00
|
|
|
if (fmt && !strcasecmp(fmt + 1, "LOG")) {
|
2020-08-26 10:37:33 +00:00
|
|
|
const char *t = strrchr(filename, '.');
|
2024-02-28 06:50:11 +00:00
|
|
|
std::string wl_name = std::string(filename, t - filename) + ".add";
|
2024-03-01 12:09:20 +00:00
|
|
|
auto [wl_mem, err] = readfile(wl_name.c_str());
|
|
|
|
if (err < 0) {
|
2024-02-28 06:50:11 +00:00
|
|
|
fprintf(stderr, "No file %s found. No WLog extensions.\n", wl_name.c_str());
|
2024-03-01 12:09:20 +00:00
|
|
|
wl_mem.clear();
|
2020-08-26 10:37:33 +00:00
|
|
|
}
|
2024-03-01 12:09:20 +00:00
|
|
|
return datatrak_import(mem, wl_mem, log);
|
Import Datatrak/WLog files
Sequentially parses a file, expected to be a Datatrak/WLog divelog, and
converts the dive info into Subsurface's dive structure.
As my first DC, back in 90s, was an Aladin Air X, the obvious choice of log
software was DTrak (Win version). After using it for some time we moved to WLog
(shareware software more user friendly than Dtrak, printing capable, and still
better, it runs under wine, which, as linux user, was definitive for me). Then,
some years later, my last Aladin died and I moved to an OSTC, forcing me to
look for a software that support this DC.
I found JDivelog which was capable of import Dtrak logs and used it for some
time until discovered Subsurface existence and devoted to it.
The fact was that importing Dtrak dives in JDivelog and then re-importing them
in Subsurface caused a significant data loss (mainly in the profile events and
alarms) and weird location of some other info in the dive notes (mostly tag
items in the original Dtrak software). This situation can't actually be solved
with tools like divelogs.de which causes similar if no greater data loss.
Although this won't be a core feature for Subsurface, I expect it can be useful
for some other divers as has been for me.
Comments and issues:
Datatrak/Wlog files include a lot of diving data which are not directly
supported in Subsurface, in these cases we choose mostly to use "tags".
The lack of some important info in Datatrak archives (e.g. tank's initial
pressure) forces us to do some arbitrary assumptions (e.g. initial pressure =
200 bar).
There might be archives coming directly from old DOS days, as first versions
of Datatrak run on that OS; they were coded CP437 or CP850, while dive logs
coming from Win versions seems to be coded CP1252. Finally, Wlog seems to use a
mixed confusing style. Program directly converts some of the old encoded chars
to iso8859 but is expected there be some issues with non alphabetic chars, e.g.
"ª".
There are two text fields: "Other activities" and "Dive notes", both limited to
256 char size. We have merged them in Subsurface's "Dive Notes" although the
first one could be "tagged", but we're unsure that the user had filled it in
a tag friendly way.
WLog adds some information to the dive and lets the user to write more than
256 chars notes. This is achieved, while keeping compatibility with DTrak
divelogs, by adding a complementary file named equally as the .log file and
with .add extension where all this info is stored. We have, still, not worked
with this complementary files.
This work is based on the paper referenced in butracker #194 which has some
errors (e.g. beginning of log and beginning of dive are changed) and a lot of
bytes of unknown meaning. Example.log shows, at least, one more byte than those
referred in the paper for the O2 Aladin computer, this could be a byte referred
to the use of SCR but the lack of an OC dive with O2 computer makes impossible
for us to compare.
The only way we have figured out to distinguish a priori between SCR and non
SCR dives with O2 computers is that the dives are tagged with a "rebreather"
tag. Obviously this is not a very trusty way of doing things. In SCR dives,
the O2% in mix means, probably, the maximum O2% in the circuit, not the O2%
of the EAN mix in the tanks, which would be unknown in this case.
The list of DCs related in bug #194 paper seems incomplete, we have added
one or two from WLog and discarded those which are known to exist but whose
model is unknown, grouping them under the imaginative name of "unknown". The
list can easily be increased in the future if we ever know the models
identifiers.
BTW, in Example.log, 0x00 identifier is used for some DC dives and from my own
divelogs is inferred that 0x00 is used for manually entered dives, this could
easily be an error in Example.log coming from a preproduction DC model.
Example.log which is shipped in datatrak package is included in dives
directory for testing pourposes.
[Dirk Hohndel: some small cleanups, merged with latest master, support
divesites, remove the pointless memset() before free() calls
add to cmake build]
Signed-off-by: Salvador Cuñat <salvador.cunat@gmail.com>
Signed-off-by: Dirk Hohndel <dirk@hohndel.org>
2014-11-05 18:38:27 +00:00
|
|
|
}
|
|
|
|
|
2015-04-03 23:07:59 +00:00
|
|
|
/* OSTCtools */
|
|
|
|
if (fmt && (!strcasecmp(fmt + 1, "DIVE"))) {
|
core: introduce divelog structure
The parser API was very annoying, as a number of tables
to-be-filled were passed in as pointers. The goal of this
commit is to collect all these tables in a single struct.
This should make it (more or less) clear what is actually
written into the divelog files.
Moreover, it should now be rather easy to search for
instances, where the global logfile is accessed (and it
turns out that there are many!).
The divelog struct does not contain the tables as substructs,
but only collects pointers. The idea is that the "divelog.h"
file can be included without all the other files describing
the numerous tables.
To make it easier to use from C++ parts of the code, the
struct implements a constructor and a destructor. Sadly,
we can't use smart pointers, since the pointers are accessed
from C code. Therfore the constructor and destructor are
quite complex.
The whole commit is large, but was mostly an automatic
conversion.
One oddity of note: the divelog structure also contains
the "autogroup" flag, since that is saved in the divelog.
This actually fixes a bug: Before, when importing dives
from a different log, the autogroup flag was overwritten.
This was probably not intended and does not happen anymore.
Signed-off-by: Berthold Stoeger <bstoeger@mail.tuwien.ac.at>
2022-11-08 20:31:08 +00:00
|
|
|
ostctools_import(filename, log);
|
2015-04-03 23:07:59 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2024-03-01 12:09:20 +00:00
|
|
|
return parse_file_buffer(filename, mem, log);
|
2012-01-26 21:00:45 +00:00
|
|
|
}
|