Merge pull request #1766 from pi-hole/new/teleporter_v5

Add support for legacy Teleporter archives
This commit is contained in:
Dominik 2024-01-06 16:42:04 +01:00 committed by GitHub
commit 977acf480c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 746 additions and 86 deletions

View File

@ -2,14 +2,19 @@
"name": "FTL x86_64 Build Env",
"image": "ghcr.io/pi-hole/ftl-build:v2.4.1",
"runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
"extensions": [
"jetmartin.bats",
"ms-vscode.cpptools",
"ms-vscode.cmake-tools",
"eamodio.gitlens"
],
"customizations": {
"vscode": {
"extensions": [
"jetmartin.bats",
"ms-vscode.cpptools",
"ms-vscode.cmake-tools",
"eamodio.gitlens"
]
}
},
"mounts": [
"type=bind,source=/home/${localEnv:USER}/.ssh,target=/root/.ssh,readonly"
"type=bind,source=/home/${localEnv:USER}/.ssh,target=/root/.ssh,readonly",
"type=bind,source=/var/www/html,target=/var/www/html,readonly"
]
}

View File

@ -768,7 +768,7 @@ int api_list(struct ftl_conn *api)
}
else if((api->item = startsWith("/api/domains/allow", api)) != NULL)
{
listtype = GRAVITY_DOMAINLIST_ALLOW_ALL;
listtype = GRAVITY_DOMAINLIST_ALLOW_ALL;
}
else if((api->item = startsWith("/api/domains/deny/exact", api)) != NULL)
{

View File

@ -15,8 +15,18 @@
#include "api/api.h"
// ERRBUF_SIZE
#include "config/dnsmasq_config.h"
// inflate_buffer()
#include "zip/gzip.h"
// find_file_in_tar()
#include "zip/tar.h"
// sqlite3_open_v2()
#include "database/sqlite3.h"
// dbquery()
#include "database/common.h"
// MAX_ROTATIONS
#include "files.h"
#define MAXZIPSIZE (50u*1024*1024)
#define MAXFILESIZE (50u*1024*1024)
static int api_teleporter_GET(struct ftl_conn *api)
{
@ -58,9 +68,9 @@ static int api_teleporter_GET(struct ftl_conn *api)
struct upload_data {
bool too_large;
char *sid;
char *zip_data;
char *zip_filename;
size_t zip_size;
uint8_t *data;
char *filename;
size_t filesize;
};
// Callback function for CivetWeb to determine which fields we want to receive
@ -79,7 +89,7 @@ static int field_found(const char *key,
is_sid = false;
if(strcasecmp(key, "file") == 0 && filename && *filename)
{
data->zip_filename = strdup(filename);
data->filename = strdup(filename);
is_file = true;
return MG_FORM_FIELD_STORAGE_GET;
}
@ -103,21 +113,21 @@ static int field_get(const char *key, const char *value, size_t valuelen, void *
if(is_file)
{
if(data->zip_size + valuelen > MAXZIPSIZE)
if(data->filesize + valuelen > MAXFILESIZE)
{
log_warn("Uploaded Teleporter ZIP archive is too large (limit is %u bytes)",
MAXZIPSIZE);
log_warn("Uploaded Teleporter file is too large (limit is %u bytes)",
MAXFILESIZE);
data->too_large = true;
return MG_FORM_FIELD_HANDLE_ABORT;
}
// Allocate memory for the raw ZIP archive data
data->zip_data = realloc(data->zip_data, data->zip_size + valuelen);
// Copy the raw ZIP archive data
memcpy(data->zip_data + data->zip_size, value, valuelen);
// Store the size of the ZIP archive raw data
data->zip_size += valuelen;
log_debug(DEBUG_API, "Received ZIP archive (%zu bytes, buffer is now %zu bytes)",
valuelen, data->zip_size);
// Allocate memory for the raw file data
data->data = realloc(data->data, data->filesize + valuelen);
// Copy the raw file data
memcpy(data->data + data->filesize, value, valuelen);
// Store the size of the file raw data
data->filesize += valuelen;
log_debug(DEBUG_API, "Received file (%zu bytes, buffer is now %zu bytes)",
valuelen, data->filesize);
}
else if(is_sid)
{
@ -143,24 +153,28 @@ static int field_stored(const char *path, long long file_size, void *user_data)
static int free_upload_data(struct upload_data *data)
{
// Free allocated memory
if(data->zip_filename)
if(data->filename)
{
free(data->zip_filename);
data->zip_filename = NULL;
free(data->filename);
data->filename = NULL;
}
if(data->sid)
{
free(data->sid);
data->sid = NULL;
}
if(data->zip_data)
if(data->data)
{
free(data->zip_data);
data->zip_data = NULL;
free(data->data);
data->data = NULL;
}
return 0;
}
// Private function prototypes
static int process_received_zip(struct ftl_conn *api, struct upload_data *data);
static int process_received_tar_gz(struct ftl_conn *api, struct upload_data *data);
static int api_teleporter_POST(struct ftl_conn *api)
{
struct upload_data data;
@ -170,7 +184,7 @@ static int api_teleporter_POST(struct ftl_conn *api)
// Disallow large ZIP archives (> 50 MB) to prevent DoS attacks.
// Typically, the ZIP archive size should be around 30-100 kB.
if(req_info->content_length > MAXZIPSIZE)
if(req_info->content_length > MAXFILESIZE)
{
free_upload_data(&data);
return send_json_error(api, 400,
@ -191,7 +205,7 @@ static int api_teleporter_POST(struct ftl_conn *api)
}
// Check if we received something we consider being a file
if(data.zip_data == NULL || data.zip_size == 0)
if(data.data == NULL || data.filesize == 0)
{
free_upload_data(&data);
return send_json_error(api, 400,
@ -209,28 +223,46 @@ static int api_teleporter_POST(struct ftl_conn *api)
"ZIP archive too large",
NULL);
}
/*
// Set the payload to the SID we received (if available)
if(data.sid != NULL)
// Check if we received something that claims to be a ZIP archive
// - filename should end in ".zip"
// - the data itself
// - should be at least 40 bytes long
// - start with 0x04034b50 (local file header signature, see https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT)
if(strlen(data.filename) > 4 &&
strcmp(data.filename + strlen(data.filename) - 4, ".zip") == 0 &&
data.filesize >= 40 &&
memcmp(data.data, "\x50\x4b\x03\x04", 4) == 0)
{
const size_t bufsize = strlen(data.sid) + 5;
api->payload.raw = calloc(bufsize, sizeof(char));
strncpy(api->payload.raw, "sid=", 5);
strncat(api->payload.raw, data.sid, bufsize - 4);
return process_received_zip(api, &data);
}
// Check if we received something that claims to be a TAR.GZ archive
// - filename should end in ".tar.gz"
// - the data itself
// - should be at least 40 bytes long
// - start with 0x8b1f (local file header signature, see https://www.ietf.org/rfc/rfc1952.txt)
else if(strlen(data.filename) > 7 &&
strcmp(data.filename + strlen(data.filename) - 7, ".tar.gz") == 0 &&
data.filesize >= 40 &&
memcmp(data.data, "\x1f\x8b", 2) == 0)
{
return process_received_tar_gz(api, &data);
}
// Check if the client is authorized to use this API endpoint
if(check_client_auth(api) == API_AUTH_UNAUTHORIZED)
{
free_upload_data(&data);
return send_json_unauthorized(api);
}
*/
// Process what we received
// else: invalid file
free_upload_data(&data);
return send_json_error(api, 400,
"bad_request",
"Invalid file",
"The uploaded file does not appear to be a valid Pi-hole Teleporter archive");
}
static int process_received_zip(struct ftl_conn *api, struct upload_data *data)
{
char hint[ERRBUF_SIZE];
memset(hint, 0, sizeof(hint));
cJSON *json_files = JSON_NEW_ARRAY();
const char *error = read_teleporter_zip(data.zip_data, data.zip_size, hint, json_files);
const char *error = read_teleporter_zip(data->data, data->filesize, hint, json_files);
if(error != NULL)
{
const size_t msglen = strlen(error) + strlen(hint) + 4;
@ -242,7 +274,7 @@ static int api_teleporter_POST(struct ftl_conn *api)
strcat(msg, ": ");
strcat(msg, hint);
}
free_upload_data(&data);
free_upload_data(data);
return send_json_error_free(api, 400,
"bad_request",
"Invalid ZIP archive",
@ -250,7 +282,7 @@ static int api_teleporter_POST(struct ftl_conn *api)
}
// Free allocated memory
free_upload_data(&data);
free_upload_data(data);
// Send response
cJSON *json = JSON_NEW_OBJECT();
@ -258,6 +290,445 @@ static int api_teleporter_POST(struct ftl_conn *api)
JSON_SEND_OBJECT(json);
}
static struct teleporter_files {
const char *filename; // Filename of the file in the archive
const char *table_name; // Name of the table in the database
const int listtype; // Type of list (only used for domainlist table)
const size_t num_columns; // Number of columns in the table
const char *columns[10]; // List of columns in the table
} teleporter_v5_files[] = {
{
.filename = "adlist.json",
.table_name = "adlist",
.listtype = -1,
.num_columns = 10,
.columns = { "id", "address", "enabled", "date_added", "date_modified", "comment", "date_updated", "number", "invalid_domains", "status" } // abp_entries and type are not defined in Pi-hole v5.x
},{
.filename = "adlist_by_group.json",
.table_name = "adlist_by_group",
.listtype = -1,
.num_columns = 2,
.columns = { "group_id", "adlist_id" }
},{
.filename = "blacklist.exact.json",
.table_name = "domainlist",
.listtype = 1, // GRAVITY_DOMAINLIST_DENY_EXACT
.num_columns = 7,
.columns = { "id", "domain", "enabled", "date_added", "date_modified", "comment", "type" }
},{
.filename = "blacklist.regex.json",
.table_name = "domainlist",
.listtype = 3, // GRAVITY_DOMAINLIST_DENY_REGEX
.num_columns = 7,
.columns = { "id", "domain", "enabled", "date_added", "date_modified", "comment", "type" }
},{
.filename = "client.json",
.table_name = "client",
.listtype = -1,
.num_columns = 5,
.columns = { "id", "ip", "date_added", "date_modified", "comment" }
},{
.filename = "client_by_group.json",
.table_name = "client_by_group",
.listtype = -1,
.num_columns = 2,
.columns = { "group_id", "client_id" }
},{
.filename = "domainlist_by_group.json",
.table_name = "domainlist_by_group",
.listtype = -1,
.num_columns = 2,
.columns = { "group_id", "domainlist_id" }
},{
.filename = "group.json",
.table_name = "group",
.listtype = -1,
.num_columns = 6,
.columns = { "id", "enabled", "name", "date_added", "date_modified", "description" }
},{
.filename = "whitelist.exact.json",
.table_name = "domainlist",
.listtype = 0, // GRAVITY_DOMAINLIST_ALLOW_EXACT
.num_columns = 7,
.columns = { "id", "domain", "enabled", "date_added", "date_modified", "comment", "type" }
},{
.filename = "whitelist.regex.json",
.table_name = "domainlist",
.listtype = 2, // GRAVITY_DOMAINLIST_ALLOW_REGEX
.num_columns = 7,
.columns = { "id", "domain", "enabled", "date_added", "date_modified", "comment", "type" }
}
};
static bool import_json_table(cJSON *json, struct teleporter_files *file)
{
// Check if the JSON object is an array
if(!cJSON_IsArray(json))
{
log_err("import_json_table(%s): JSON object is not an array", file->filename);
return false;
}
// Check if the JSON array is empty, if so, we can return early
const int num_entries = cJSON_GetArraySize(json);
// Check if all the JSON entries contain all the expected columns
cJSON *json_object = NULL;
cJSON_ArrayForEach(json_object, json)
{
if(!cJSON_IsObject(json_object))
{
log_err("import_json_table(%s): JSON array does not contain objects", file->filename);
return false;
}
// If this is a record for the domainlist table, add type/kind
if(strcmp(file->table_name, "domainlist") == 0)
{
// Add type/kind to the JSON object
cJSON_AddNumberToObject(json_object, "type", file->listtype);
}
// Check if the JSON object contains the expected columns
for(size_t i = 0; i < file->num_columns; i++)
{
if(cJSON_GetObjectItemCaseSensitive(json_object, file->columns[i]) == NULL)
{
log_err("import_json_table(%s): JSON object does not contain column \"%s\"", file->filename, file->columns[i]);
return false;
}
}
}
log_info("import_json_table(%s): JSON array contains %d entr%s", file->filename, num_entries, num_entries == 1 ? "y" : "ies");
// Open database connection
sqlite3 *db = NULL;
if(sqlite3_open_v2(config.files.gravity.v.s, &db, SQLITE_OPEN_READWRITE, NULL) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to open database file \"%s\": %s",
file->filename, config.files.database.v.s, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Disable foreign key constraints
if(sqlite3_exec(db, "PRAGMA foreign_keys = OFF;", NULL, NULL, NULL) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to disable foreign key constraints: %s", file->filename, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Start transaction
if(sqlite3_exec(db, "BEGIN TRANSACTION;", NULL, NULL, NULL) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to start transaction: %s", file->filename, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Clear existing table entries
if(file->listtype < 0)
{
// Delete all entries in the table
log_debug(DEBUG_API, "import_json_table(%s): Deleting all entries from table \"%s\"", file->filename, file->table_name);
if(dbquery(db, "DELETE FROM \"%s\";", file->table_name) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to delete entries from table \"%s\": %s",
file->filename, file->table_name, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
}
else
{
// Delete all entries in the table of the same type
log_debug(DEBUG_API, "import_json_table(%s): Deleting all entries from table \"%s\" of type %d", file->filename, file->table_name, file->listtype);
if(dbquery(db, "DELETE FROM \"%s\" WHERE type = %d;", file->table_name, file->listtype) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to delete entries from table \"%s\": %s",
file->filename, file->table_name, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
}
// Build dynamic SQL insertion statement
// "INSERT OR IGNORE INTO table (column1, column2, ...) VALUES (?, ?, ...);"
char *sql = sqlite3_mprintf("INSERT OR IGNORE INTO \"%s\" (", file->table_name);
for(size_t i = 0; i < file->num_columns; i++)
{
char *sql2 = sqlite3_mprintf("%s%s", sql, file->columns[i]);
sqlite3_free(sql);
sql = NULL;
if(i < file->num_columns - 1)
{
sql = sqlite3_mprintf("%s, ", sql2);
sqlite3_free(sql2);
sql2 = NULL;
}
else
{
sql = sqlite3_mprintf("%s) VALUES (", sql2);
sqlite3_free(sql2);
sql2 = NULL;
}
}
for(size_t i = 0; i < file->num_columns; i++)
{
char *sql2 = sqlite3_mprintf("%s?", sql);
sqlite3_free(sql);
sql = NULL;
if(i < file->num_columns - 1)
{
sql = sqlite3_mprintf("%s, ", sql2);
sqlite3_free(sql2);
sql2 = NULL;
}
else
{
sql = sqlite3_mprintf("%s);", sql2);
sqlite3_free(sql2);
sql2 = NULL;
}
}
// Prepare SQL statement
sqlite3_stmt *stmt = NULL;
if(sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK)
{
log_err("Unable to prepare SQL statement: %s", sqlite3_errmsg(db));
sqlite3_free(sql);
sqlite3_close(db);
return false;
}
// Free allocated memory
sqlite3_free(sql);
sql = NULL;
// Iterate over all JSON objects
cJSON_ArrayForEach(json_object, json)
{
// Bind values to SQL statement
for(size_t i = 0; i < file->num_columns; i++)
{
cJSON *json_value = cJSON_GetObjectItemCaseSensitive(json_object, file->columns[i]);
if(cJSON_IsString(json_value))
{
// Bind string value
if(sqlite3_bind_text(stmt, i + 1, json_value->valuestring, -1, SQLITE_STATIC) != SQLITE_OK)
{
log_err("Unable to bind text value to SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
else if(cJSON_IsNumber(json_value))
{
// Bind integer value
if(sqlite3_bind_int(stmt, i + 1, json_value->valueint) != SQLITE_OK)
{
log_err("Unable to bind integer value to SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
else if(cJSON_IsNull(json_value))
{
// Bind NULL value
if(sqlite3_bind_null(stmt, i + 1) != SQLITE_OK)
{
log_err("Unable to bind NULL value to SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
else
{
log_err("Unable to bind value to SQL statement: type = %X", (unsigned int)json_value->type & 0xFF);
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
// Execute SQL statement
if(sqlite3_step(stmt) != SQLITE_DONE)
{
log_err("Unable to execute SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
// Reset SQL statement
if(sqlite3_reset(stmt) != SQLITE_OK)
{
log_err("Unable to reset SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
// Finalize SQL statement
if(sqlite3_finalize(stmt) != SQLITE_OK)
{
log_err("Unable to finalize SQL statement: %s", sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Commit transaction
if(sqlite3_exec(db, "COMMIT;", NULL, NULL, NULL) != SQLITE_OK)
{
log_err("Unable to commit transaction: %s", sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Close database connection
sqlite3_close(db);
return true;
}
static int process_received_tar_gz(struct ftl_conn *api, struct upload_data *data)
{
// Try to decompress the received data
uint8_t *archive = NULL;
mz_ulong archive_size = 0u;
if(!inflate_buffer(data->data, data->filesize, &archive, &archive_size))
{
free_upload_data(data);
return send_json_error(api, 400,
"bad_request",
"Invalid GZIP archive",
"The uploaded file does not appear to be a valid gzip archive - decompression failed");
}
// Print all files in the TAR archive if in debug mode
if(config.debug.api.v.b)
{
cJSON *json_files = list_files_in_tar(archive, archive_size);
cJSON *file = NULL;
cJSON_ArrayForEach(file, json_files)
{
const cJSON *name = cJSON_GetObjectItemCaseSensitive(file, "name");
const cJSON *size = cJSON_GetObjectItemCaseSensitive(file, "size");
if(name == NULL || size == NULL)
continue;
log_debug(DEBUG_API, "Found file in TAR archive: \"%s\" (%d bytes)",
name->valuestring, size->valueint);
}
}
// Parse JSON files in the TAR archive
cJSON *imported_files = JSON_NEW_ARRAY();
for(size_t i = 0; i < sizeof(teleporter_v5_files) / sizeof(struct teleporter_files); i++)
{
size_t fileSize = 0u;
cJSON *json = NULL;
const char *file = find_file_in_tar(archive, archive_size, teleporter_v5_files[i].filename, &fileSize);
if(file != NULL && fileSize > 0u && (json = cJSON_ParseWithLength(file, fileSize)) != NULL)
if(import_json_table(json, &teleporter_v5_files[i]))
JSON_COPY_STR_TO_ARRAY(imported_files, teleporter_v5_files[i].filename);
}
// Temporarily write further files to to disk so we can import them on restart
struct {
const char *archive_name;
const char *destination;
} extract_files[] = {
{
.archive_name = "custom.list",
.destination = DNSMASQ_CUSTOM_LIST_LEGACY
},{
.archive_name = "dhcp.leases",
.destination = DHCPLEASESFILE
},{
.archive_name = "pihole-FTL.conf",
.destination = GLOBALCONFFILE_LEGACY
},{
.archive_name = "setupVars.conf",
.destination = config.files.setupVars.v.s
}
};
for(size_t i = 0; i < sizeof(extract_files) / sizeof(*extract_files); i++)
{
size_t fileSize = 0u;
const char *file = find_file_in_tar(archive, archive_size, extract_files[i].archive_name, &fileSize);
if(file != NULL && fileSize > 0u)
{
// Write file to disk
log_info("Writing file \"%s\" (%zu bytes) to \"%s\"",
extract_files[i].archive_name, fileSize, extract_files[i].destination);
FILE *fp = fopen(extract_files[i].destination, "wb");
if(fp == NULL)
{
log_err("Unable to open file \"%s\" for writing: %s", extract_files[i].destination, strerror(errno));
continue;
}
if(fwrite(file, fileSize, 1, fp) != 1)
{
log_err("Unable to write file \"%s\": %s", extract_files[i].destination, strerror(errno));
fclose(fp);
continue;
}
fclose(fp);
JSON_COPY_STR_TO_ARRAY(imported_files, extract_files[i].destination);
}
}
// Append WEB_PORTS to setupVars.conf
FILE *fp = fopen(config.files.setupVars.v.s, "a");
if(fp == NULL)
log_err("Unable to open file \"%s\" for appending: %s", config.files.setupVars.v.s, strerror(errno));
else
{
fprintf(fp, "WEB_PORTS=%s\n", config.webserver.port.v.s);
fclose(fp);
}
// Remove pihole.toml to prevent it from being imported on restart
if(remove(GLOBALTOMLPATH) != 0)
log_err("Unable to remove file \"%s\": %s", GLOBALTOMLPATH, strerror(errno));
// Remove all rotated pihole.toml files to avoid automatic config
// restore on restart
for(unsigned int i = MAX_ROTATIONS; i > 0; i--)
{
const char *fname = GLOBALTOMLPATH;
const char *filename = basename(fname);
// extra 6 bytes is enough space for up to 999 rotations ("/", ".", "\0", "999")
const size_t buflen = strlen(filename) + strlen(BACKUP_DIR) + 6;
char *path = calloc(buflen, sizeof(char));
snprintf(path, buflen, BACKUP_DIR"/%s.%u", filename, i);
// Remove file (if it exists)
if(remove(path) != 0 && errno != ENOENT)
log_err("Unable to remove file \"%s\": %s", path, strerror(errno));
}
// Free allocated memory
free_upload_data(data);
// Signal FTL we want to restart for re-import
api->ftl.restart = true;
// Send response
cJSON *json = JSON_NEW_OBJECT();
JSON_ADD_ITEM_TO_OBJECT(json, "files", imported_files);
JSON_SEND_OBJECT(json);
}
int api_teleporter(struct ftl_conn *api)
{
if(api->method == HTTP_GET)

View File

@ -1435,36 +1435,39 @@ bool readFTLconf(struct config *conf, const bool rewrite)
rename(GLOBALTOMLPATH, new_name);
}
// Determine default webserver ports
// Check if ports 80/TCP and 443/TCP are already in use
const in_port_t http_port = port_in_use(80) ? 8080 : 80;
const in_port_t https_port = port_in_use(443) ? 8443 : 443;
// Create a string with the default ports
// Allocate memory for the string
char *ports = calloc(32, sizeof(char));
if(ports == NULL)
// Determine default webserver ports if not imported from setupVars.conf
if(!(config.webserver.port.f & FLAG_CONF_IMPORTED))
{
log_err("Unable to allocate memory for default ports string");
return false;
// Check if ports 80/TCP and 443/TCP are already in use
const in_port_t http_port = port_in_use(80) ? 8080 : 80;
const in_port_t https_port = port_in_use(443) ? 8443 : 443;
// Create a string with the default ports
// Allocate memory for the string
char *ports = calloc(32, sizeof(char));
if(ports == NULL)
{
log_err("Unable to allocate memory for default ports string");
return false;
}
// Create the string
snprintf(ports, 32, "%d,%ds", http_port, https_port);
// Append IPv6 ports if IPv6 is enabled
const bool have_ipv6 = ipv6_enabled();
if(have_ipv6)
snprintf(ports + strlen(ports), 32 - strlen(ports),
",[::]:%d,[::]:%ds", http_port, https_port);
// Set default values for webserver ports
if(conf->webserver.port.t == CONF_STRING_ALLOCATED)
free(conf->webserver.port.v.s);
conf->webserver.port.v.s = ports;
conf->webserver.port.t = CONF_STRING_ALLOCATED;
log_info("Initialised webserver ports at %d (HTTP) and %d (HTTPS), IPv6 support is %s",
http_port, https_port, have_ipv6 ? "enabled" : "disabled");
}
// Create the string
snprintf(ports, 32, "%d,%ds", http_port, https_port);
// Append IPv6 ports if IPv6 is enabled
const bool have_ipv6 = ipv6_enabled();
if(have_ipv6)
snprintf(ports + strlen(ports), 32 - strlen(ports),
",[::]:%d,[::]:%ds", http_port, https_port);
// Set default values for webserver ports
if(conf->webserver.port.t == CONF_STRING_ALLOCATED)
free(conf->webserver.port.v.s);
conf->webserver.port.v.s = ports;
conf->webserver.port.t = CONF_STRING_ALLOCATED;
log_info("Initialised webserver ports at %d (HTTP) and %d (HTTPS), IPv6 support is %s",
http_port, https_port, have_ipv6 ? "enabled" : "disabled");
// Initialize the TOML config file
writeFTLtoml(true);

View File

@ -38,6 +38,9 @@
// characters will be replaced by their UTF-8 escape sequences (UCS-2)
#define TOML_UTF8
// Location of the legacy (pre-v6.0) config file
#define GLOBALCONFFILE_LEGACY "/etc/pihole/pihole-FTL.conf"
union conf_value {
bool b; // boolean value
int i; // integer value
@ -94,6 +97,7 @@ enum conf_type {
#define FLAG_INVALIDATE_SESSIONS (1 << 3)
#define FLAG_WRITE_ONLY (1 << 4)
#define FLAG_ENV_VAR (1 << 5)
#define FLAG_CONF_IMPORTED (1 << 6)
struct conf_item {
const char *k; // item Key

View File

@ -43,7 +43,7 @@ static FILE * __attribute__((nonnull(1), malloc, warn_unused_result)) openFTLcon
return fp;
// Local file not present, try system file
*path = "/etc/pihole/pihole-FTL.conf";
*path = GLOBALCONFFILE_LEGACY;
fp = fopen(*path, "r");
return fp;
@ -113,9 +113,12 @@ const char *readFTLlegacy(struct config *conf)
const char *path = NULL;
FILE *fp = openFTLconf(&path);
if(fp == NULL)
{
log_warn("No readable FTL config file found, using default settings");
return NULL;
}
log_notice("Reading legacy config file");
log_info("Reading legacy config files from %s", path);
// MAXDBDAYS
// defaults to: 365 days

View File

@ -42,6 +42,7 @@ static void get_conf_string_from_setupVars(const char *key, struct conf_item *co
free(conf_item->v.s);
conf_item->v.s = strdup(setupVarsValue);
conf_item->t = CONF_STRING_ALLOCATED;
conf_item->f |= FLAG_CONF_IMPORTED;
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
@ -374,6 +375,8 @@ static void get_conf_listeningMode_from_setupVars(void)
void importsetupVarsConf(void)
{
log_info("Migrating config from %s", config.files.setupVars.v.s);
// Try to obtain password hash from setupVars.conf
get_conf_string_from_setupVars("WEBPASSWORD", &config.webserver.api.pwhash);
@ -443,7 +446,26 @@ void importsetupVarsConf(void)
get_conf_bool_from_setupVars("DHCP_RAPID_COMMIT", &config.dhcp.rapidCommit);
get_conf_bool_from_setupVars("queryLogging", &config.dns.queryLogging);
get_conf_string_from_setupVars("GRAVITY_TMPDIR", &config.files.gravity_tmp);
// Ports may be temporarily stored when importing a legacy Teleporter v5 file
get_conf_string_from_setupVars("WEB_PORTS", &config.webserver.port);
// Move the setupVars.conf file to setupVars.conf.old
char *old_setupVars = calloc(strlen(config.files.setupVars.v.s) + 5, sizeof(char));
if(old_setupVars == NULL)
{
log_warn("Could not allocate memory for old_setupVars");
return;
}
strcpy(old_setupVars, config.files.setupVars.v.s);
strcat(old_setupVars, ".old");
if(rename(config.files.setupVars.v.s, old_setupVars) != 0)
log_warn("Could not move %s to %s", config.files.setupVars.v.s, old_setupVars);
else
log_info("Moved %s to %s", config.files.setupVars.v.s, old_setupVars);
free(old_setupVars);
}
char* __attribute__((pure)) find_equals(char *s)

View File

@ -157,7 +157,7 @@ int main (int argc, char *argv[])
cleanup(exit_code);
if(exit_code == RESTART_FTL_CODE)
execv(argv[0], argv);
execvp(argv[0], argv);
return exit_code;
}

View File

@ -11,6 +11,8 @@
set(sources
gzip.c
gzip.h
tar.c
tar.h
teleporter.c
teleporter.h
)

View File

@ -14,7 +14,6 @@
#include <string.h>
// le32toh and friends
#include <endian.h>
#include "miniz/miniz.h"
#include "gzip.h"
#include "log.h"
@ -103,8 +102,8 @@ static bool deflate_buffer(const unsigned char *buffer_uncompressed, const mz_ul
return true;
}
static bool inflate_buffer(unsigned char *buffer_compressed, mz_ulong size_compressed,
unsigned char **buffer_uncompressed, mz_ulong *size_uncompressed)
bool inflate_buffer(unsigned char *buffer_compressed, mz_ulong size_compressed,
unsigned char **buffer_uncompressed, mz_ulong *size_uncompressed)
{
// Check GZIP header (magic byte 1F 8B and compression algorithm deflate 08)
if(buffer_compressed[0] != 0x1F || buffer_compressed[1] != 0x8B)

View File

@ -11,6 +11,10 @@
#define GZIP_H
#include <stdbool.h>
#include "miniz/miniz.h"
bool inflate_buffer(unsigned char *buffer_compressed, mz_ulong size_compressed,
unsigned char **buffer_uncompressed, mz_ulong *size_uncompressed);
bool deflate_file(const char *in, const char *out, bool verbose);
bool inflate_file(const char *infile, const char *outfile, bool verbose);

128
src/zip/tar.c Normal file
View File

@ -0,0 +1,128 @@
/* Pi-hole: A black hole for Internet advertisements
* (c) 2023 Pi-hole, LLC (https://pi-hole.net)
* Network-wide ad blocking via your own hardware.
*
* FTL Engine
* In-memory tar reading routines
*
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#include "zip/tar.h"
#include "log.h"
// TAR offsets
#define TAR_NAME_OFFSET 0
#define TAR_SIZE_OFFSET 124
#define TAR_MAGIC_OFFSET 257
// TAR constants
#define TAR_BLOCK_SIZE 512
#define TAR_NAME_SIZE 100
#define TAR_SIZE_SIZE 12
#define TAR_MAGIC_SIZE 5
static const char MAGIC_CONST[] = "ustar"; // Modern GNU tar's magic const */
/**
* Find a file in a TAR archive
* @param tarData Pointer to the TAR archive in memory
* @param tarSize Size of the TAR archive in memory in bytes
* @param fileName Name of the file to find
* @param fileSize Pointer to a size_t variable to store the file size in
* @return Pointer to the file data or NULL if not found
*/
const char * __attribute__((nonnull (1,3,4))) find_file_in_tar(const uint8_t *tarData, const size_t tarSize,
const char *fileName, size_t *fileSize)
{
bool found = false;
size_t size, p = 0, newOffset = 0;
// Convert to char * to be able to do pointer arithmetic more easily
const char *tar = (const char *)tarData;
// Initialize fileSize to 0
*fileSize = 0;
// Loop through TAR file
do
{
// "Load" data from tar - just point to passed memory
const char *name = tar + TAR_NAME_OFFSET + p + newOffset;
const char *sz = tar + TAR_SIZE_OFFSET + p + newOffset; // size str
p += newOffset; // pointer to current file's data in TAR
// Check for supported TAR version or end of TAR
for (size_t i = 0; i < TAR_MAGIC_SIZE; i++)
if (tar[i + TAR_MAGIC_OFFSET + p] != MAGIC_CONST[i])
return NULL;
// Convert file size from string into integer
size = 0;
for (ssize_t i = TAR_SIZE_SIZE - 2, mul = 1; i >= 0; mul *= 8, i--) // Octal str to int
if ((sz[i] >= '1') && (sz[i] <= '9'))
size += (sz[i] - '0') * mul;
//Offset size in bytes. Depends on file size and TAR block size
newOffset = (1 + size / TAR_BLOCK_SIZE) * TAR_BLOCK_SIZE; //trim by block
if ((size % TAR_BLOCK_SIZE) > 0)
newOffset += TAR_BLOCK_SIZE;
found = strncmp(name, fileName, TAR_NAME_SIZE) == 0;
} while (!found && (p + newOffset + TAR_BLOCK_SIZE <= tarSize));
if (!found)
return NULL; // No file found in TAR - return NULL
// File found in TAR - return pointer to file data and set fileSize
*fileSize = size;
return tar + p + TAR_BLOCK_SIZE;
}
/**
* List all files in a TAR archive
* @param tarData Pointer to the TAR archive in memory
* @param tarSize Size of the TAR archive in memory in bytes
* @return Pointer to a cJSON array containing all file names with file size
*/
cJSON * __attribute__((nonnull (1))) list_files_in_tar(const uint8_t *tarData, const size_t tarSize)
{
cJSON *files = cJSON_CreateArray();
size_t size, p = 0, newOffset = 0;
// Convert to char * to be able to do pointer arithmetic more easily
const char *tar = (const char *)tarData;
// Loop through TAR file
do
{
// "Load" data from tar - just point to passed memory
const char *name = tar + TAR_NAME_OFFSET + p + newOffset;
const char *sz = tar + TAR_SIZE_OFFSET + p + newOffset; // size str
p += newOffset; // pointer to current file's data in TAR
// Check for supported TAR version or end of TAR
for (size_t i = 0; i < TAR_MAGIC_SIZE; i++)
if (tar[i + TAR_MAGIC_OFFSET + p] != MAGIC_CONST[i])
return files;
// Convert file size from string into integer
size = 0;
for (ssize_t i = TAR_SIZE_SIZE - 2, mul = 1; i >= 0; mul *= 8, i--) // Octal str to int
if ((sz[i] >= '1') && (sz[i] <= '9'))
size += (sz[i] - '0') * mul;
//Offset size in bytes. Depends on file size and TAR block size
newOffset = (1 + size / TAR_BLOCK_SIZE) * TAR_BLOCK_SIZE; //trim by block
if ((size % TAR_BLOCK_SIZE) > 0)
newOffset += TAR_BLOCK_SIZE;
// Add file name to cJSON array
cJSON *file = cJSON_CreateObject();
cJSON_AddItemToObject(file, "name", cJSON_CreateString(name));
cJSON_AddItemToObject(file, "size", cJSON_CreateNumber(size));
cJSON_AddItemToArray(files, file);
} while (p + newOffset + TAR_BLOCK_SIZE <= tarSize);
return files;
}

19
src/zip/tar.h Normal file
View File

@ -0,0 +1,19 @@
/* Pi-hole: A black hole for Internet advertisements
* (c) 2023 Pi-hole, LLC (https://pi-hole.net)
* Network-wide ad blocking via your own hardware.
*
* FTL Engine
* TAR reading routines
*
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#ifndef TAR_H
#define TAR_H
#include "FTL.h"
#include "webserver/cJSON/cJSON.h"
const char *find_file_in_tar(const uint8_t *tar, const size_t tarSize, const char *fileName, size_t *fileSize) __attribute__((nonnull (1,3,4)));
cJSON *list_files_in_tar(const uint8_t *tarData, const size_t tarSize) __attribute__((nonnull (1)));
#endif // TAR_H

View File

@ -523,7 +523,7 @@ static const char *test_and_import_database(void *ptr, size_t size, const char *
return NULL;
}
const char *read_teleporter_zip(char *buffer, const size_t buflen, char * const hint, cJSON *imported_files)
const char *read_teleporter_zip(uint8_t *buffer, const size_t buflen, char * const hint, cJSON *imported_files)
{
// Initialize ZIP archive
mz_zip_archive zip = { 0 };

View File

@ -15,7 +15,7 @@
const char *generate_teleporter_zip(mz_zip_archive *zip, char filename[128], void **ptr, size_t *size);
bool free_teleporter_zip(mz_zip_archive *zip);
const char *read_teleporter_zip(char *buffer, const size_t buflen, char *hint, cJSON *json_files);
const char *read_teleporter_zip(uint8_t *buffer, const size_t buflen, char *hint, cJSON *json_files);
bool write_teleporter_zip_to_disk(void);
bool read_teleporter_zip_from_disk(const char *filename);