Merge branch 'development-v6' into new/migrate_dnsmasq_conf

This commit is contained in:
DL6ER 2024-02-19 21:28:02 +01:00
commit 47f48781ac
No known key found for this signature in database
GPG Key ID: 00135ACBD90B28DD
194 changed files with 17350 additions and 7789 deletions

View File

@ -1,15 +1,20 @@
{
"name": "FTL x86_64 Build Env",
"image": "ghcr.io/pi-hole/ftl-build:v2.3-alpine",
"image": "ghcr.io/pi-hole/ftl-build:v2.5",
"runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
"extensions": [
"jetmartin.bats",
"ms-vscode.cpptools",
"ms-vscode.cmake-tools",
"eamodio.gitlens"
],
"customizations": {
"vscode": {
"extensions": [
"jetmartin.bats",
"ms-vscode.cpptools",
"ms-vscode.cmake-tools",
"eamodio.gitlens"
]
}
},
"mounts": [
"type=bind,source=/home/${localEnv:USER}/.ssh,target=/root/.ssh,readonly"
"type=bind,source=/home/${localEnv:USER}/.ssh,target=/root/.ssh,readonly",
"type=bind,source=/var/www/html,target=/var/www/html,readonly"
]
}

View File

@ -6,3 +6,4 @@ doubleclick
requestor
requestors
punycode
bitap

3
.github/Dockerfile vendored
View File

@ -1,5 +1,4 @@
ARG BUILDER="alpine"
FROM ghcr.io/pi-hole/ftl-build:v2.3-${BUILDER} AS builder
FROM ghcr.io/pi-hole/ftl-build:v2.5 AS builder
WORKDIR /app

View File

@ -87,7 +87,7 @@ jobs:
run: ls -l
-
name: Build and test FTL in ftl-build container (QEMU)
uses: Wandalen/wretry.action@v1.3.0
uses: Wandalen/wretry.action@v1.4.4
with:
attempt_limit: 3
action: docker/build-push-action@v5.0.0
@ -119,9 +119,9 @@ jobs:
-
name: Store binary artifacts for later deployoment
if: github.event_name != 'pull_request'
uses: actions/upload-artifact@v3.1.3
uses: actions/upload-artifact@v4.3.1
with:
name: tmp-storage
name: ${{ matrix.bin_name }}-binary
path: '${{ matrix.bin_name }}*'
-
name: Extract documentation files from container
@ -131,9 +131,9 @@ jobs:
-
name: Upload documentation artifacts for deployoment
if: github.event_name != 'pull_request' && matrix.platform == 'linux/amd64'
uses: actions/upload-artifact@v3.1.3
uses: actions/upload-artifact@v4.3.1
with:
name: tmp-storage
name: pihole-api-docs
path: 'api-docs.tar.gz'
deploy:
@ -146,15 +146,17 @@ jobs:
uses: actions/checkout@v4.1.1
-
name: Get Binaries and documentation built in previous jobs
uses: actions/download-artifact@v3.0.2
uses: actions/download-artifact@v4.1.2
id: download
with:
name: tmp-storage
path: ftl-builds/
path: ftl_builds/
pattern: pihole-*
merge-multiple: true
-
name: Display structure of downloaded files
run: ls -R
working-directory: ${{steps.download.outputs.download-path}}
-
name: Install SSH Key
uses: benoitchantre/setup-ssh-authentication-action@1.0.1
@ -163,14 +165,14 @@ jobs:
known-hosts: ${{ secrets.KNOWN_HOSTS }}
-
name: Untar documentation files
working-directory: ${{steps.download.outputs.download-path}}
working-directory: ftl_builds/
run: |
mkdir docs/
tar xzvf api-docs.tar.gz -C docs/
-
name: Display structure of files ready for upload
run: ls -R
working-directory: ${{steps.download.outputs.download-path}}
working-directory: ftl_builds/
-
name: Transfer Builds to Pi-hole server for pihole checkout
if: github.actor != 'dependabot[bot]'
@ -178,7 +180,7 @@ jobs:
USER: ${{ secrets.SSH_USER }}
HOST: ${{ secrets.SSH_HOST }}
TARGET_DIR: ${{ needs.smoke-tests.outputs.OUTPUT_DIR }}
SOURCE_DIR: ${{ steps.download.outputs.download-path }}
SOURCE_DIR: ftl_builds/
run: |
bash ./deploy.sh
-
@ -187,4 +189,4 @@ jobs:
uses: softprops/action-gh-release@v1
with:
files: |
${{ steps.download.outputs.download-path }}/*
ftl_builds/*

View File

@ -17,7 +17,7 @@ jobs:
- name: Set Node.js version
uses: actions/setup-node@v4
with:
node-version: "18"
node-version: "20"
- name: Install npm dependencies
run: npm ci

View File

@ -17,7 +17,7 @@ jobs:
issues: write
steps:
- uses: actions/stale@v8.0.0
- uses: actions/stale@v9.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
days-before-stale: 30

View File

@ -17,7 +17,7 @@ jobs:
pull-requests: write
steps:
- uses: actions/stale@v8.0.0
- uses: actions/stale@v9.0.0
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
# Do not automatically mark PR/issue as stale

10
.gitignore vendored
View File

@ -7,6 +7,7 @@ version~
# CMake files generated during compilation
/cmake/
/cmake_ci/
/cmake-build-debug/
/cmake-build-release/
@ -18,6 +19,15 @@ version~
/.vscode/
/build/
# __pycache__ files (API tests)
__pycache__/
# When patch fails to apply a patch segment to the original file, it saves the
# temporary original file copy out durably as *.orig, dumps the rejected segment
# to *.rej, and continues trying to apply patch segments.
*.orig
*.rej
# MAC->Vendor database files
tools/manuf.data
tools/macvendor.db

View File

@ -12,6 +12,6 @@ cmake_minimum_required(VERSION 2.8.12)
project(PIHOLE_FTL C)
set(DNSMASQ_VERSION pi-hole-v2.89-e1de9c2)
set(DNSMASQ_VERSION pi-hole-v2.90)
add_subdirectory(src)

View File

@ -7,7 +7,6 @@ patch -p1 < patch/civetweb/0001-Always-Kepler-syntax-for-Lua-server-pages.patch
patch -p1 < patch/civetweb/0001-Add-FTL-URI-rewriting-changes-to-CivetWeb.patch
patch -p1 < patch/civetweb/0001-Add-mbedTLS-debug-logging-hook.patch
patch -p1 < patch/civetweb/0001-Register-CSRF-token-in-conn-request_info.patch
patch -p1 < patch/civetweb/0001-Do-not-try-to-guess-server-hostname-in-Civetweb-when.patch
patch -p1 < patch/civetweb/0001-Log-debug-messages-to-webserver.log-when-debug.webse.patch
patch -p1 < patch/civetweb/0001-Allow-extended-ASCII-characters-in-URIs.patch

View File

@ -1,29 +0,0 @@
From e41d902b5b01896360e1235aebabd3eb352158aa Mon Sep 17 00:00:00 2001
From: DL6ER <dl6er@dl6er.de>
Date: Sun, 8 Oct 2023 14:31:20 +0200
Subject: [PATCH] Do not try to guess server hostname in Civetweb when
redirecting directory URIs to end with a slash
Signed-off-by: DL6ER <dl6er@dl6er.de>
---
src/webserver/civetweb/civetweb.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/webserver/civetweb/civetweb.c b/src/webserver/civetweb/civetweb.c
index f44b17ba..3df8eab9 100644
--- a/src/webserver/civetweb/civetweb.c
+++ b/src/webserver/civetweb/civetweb.c
@@ -15306,7 +15306,9 @@ handle_request(struct mg_connection *conn)
if (!new_path) {
mg_send_http_error(conn, 500, "out or memory");
} else {
- mg_get_request_link(conn, new_path, buflen - 1);
+ /* Pi-hole modification */
+ //mg_get_request_link(conn, new_path, buflen - 1);
+ strcpy(new_path, ri->local_uri_raw);
strcat(new_path, "/");
if (ri->query_string) {
/* Append ? and query string */
--
2.34.1

View File

@ -25,6 +25,6 @@ index 6280ebf6..a5e82f70 100644
char *zHistory;
int nHistory;
+ print_FTL_version();
#if SHELL_WIN_UTF8_OPT
switch( console_utf8_in+2*console_utf8_out ){
default: case 0: break;
#if CIO_WIN_WC_XLATE
# define SHELL_CIO_CHAR_SET (stdout_is_console? " (UTF-16 console I/O)" : "")
#else

View File

@ -24,14 +24,20 @@ set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR})
# SQLITE_DEFAULT_MEMSTATUS=0: This setting causes the sqlite3_status() interfaces that track memory usage to be disabled. This helps the sqlite3_malloc() routines run much faster, and since SQLite uses sqlite3_malloc() internally, this helps to make the entire library faster.
# SQLITE_OMIT_DEPRECATED: Omitting deprecated interfaces and features will not help SQLite to run any faster. It will reduce the library footprint, however. And it is the right thing to do.
# SQLITE_OMIT_PROGRESS_CALLBACK: The progress handler callback counter must be checked in the inner loop of the bytecode engine. By omitting this interface, a single conditional is removed from the inner loop of the bytecode engine, helping SQL statements to run slightly faster.
# SQLITE_OMIT_SHARED_CACHE: This option builds SQLite without support for shared cache mode. The sqlite3_enable_shared_cache() is omitted along with a fair amount of logic within the B-Tree subsystem associated with shared cache management. This compile-time option is recommended most applications as it results in improved performance and reduced library footprint.
# SQLITE_DEFAULT_FOREIGN_KEYS=1: This macro determines whether enforcement of foreign key constraints is enabled or disabled by default for new database connections.
# SQLITE_DQS=0: This setting disables the double-quoted string literal misfeature.
# SQLITE_ENABLE_DBPAGE_VTAB: Enables the SQLITE_DBPAGE virtual table. Warning: writing to the SQLITE_DBPAGE virtual table can very easily cause unrecoverably database corruption.
# SQLITE_TEMP_STORE=2: Store temporary tables in memory for reduced IO and higher performance (can be overwritten by the user at runtime).
# SQLITE_USE_URI=1: The advantage of using a URI filename is that query parameters on the URI can be used to control details of the newly created database connection.
# HAVE_READLINE: Enable readline support to allow easy editing, history and auto-completion
# SQLITE_DEFAULT_CACHE_SIZE=-16384: Allow up to 16 MiB of cache to be used by SQLite3 (default is 2000 kiB)
set(SQLITE_DEFINES "-DSQLITE_OMIT_LOAD_EXTENSION -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_OMIT_DEPRECATED -DSQLITE_OMIT_PROGRESS_CALLBACK -DSQLITE_DEFAULT_FOREIGN_KEYS=1 -DSQLITE_DQS=0 -DSQLITE_ENABLE_DBPAGE_VTAB -DSQLITE_TEMP_STORE=2 -DSQLITE_USE_URI=1 -DHAVE_READLINE -DSQLITE_DEFAULT_CACHE_SIZE=16384")
# SQLITE_DEFAULT_SYNCHRONOUS=1: Use normal synchronous mode (default is 2)
# SQLITE_LIKE_DOESNT_MATCH_BLOBS: This option causes the LIKE operator to only match BLOB values against BLOB values and TEXT values against TEXT values. This compile-time option makes SQLite run more efficiently when processing queries that use the LIKE operator.
# HAVE_MALLOC_USABLE_SIZE: This option causes SQLite to try to use the malloc_usable_size() function to obtain the actual size of memory allocations from the underlying malloc() system interface. Applications are encouraged to use HAVE_MALLOC_USABLE_SIZE whenever possible.
# HAVE_FDATASYNC: This option causes SQLite to try to use the fdatasync() system call to sync the database file to disk when committing a transaction. Syncing using fdatasync() is faster than syncing using fsync() as fdatasync() does not wait for the file metadata to be written to disk.
# SQLITE_DEFAULT_WORKER_THREADS=4: This option sets the default number of worker threads to use when doing parallel sorting and indexing. The default is 0 which means to use a single thread. The default for SQLITE_MAX_WORKER_THREADS is 8.
# SQLITE_MAX_PREPARE_RETRY=200: This option sets the maximum number of automatic re-preparation attempts that can occur after encountering a schema change. This can be caused by running ANALYZE which is done periodically by FTL.
set(SQLITE_DEFINES "-DSQLITE_OMIT_LOAD_EXTENSION -DSQLITE_DEFAULT_MEMSTATUS=0 -DSQLITE_OMIT_DEPRECATED -DSQLITE_OMIT_PROGRESS_CALLBACK -DSQLITE_OMIT_SHARED_CACHE -DSQLITE_DEFAULT_FOREIGN_KEYS=1 -DSQLITE_DQS=0 -DSQLITE_ENABLE_DBPAGE_VTAB -DSQLITE_TEMP_STORE=2 -DHAVE_READLINE -DSQLITE_DEFAULT_CACHE_SIZE=16384 -DSQLITE_DEFAULT_SYNCHRONOUS=1 -DSQLITE_LIKE_DOESNT_MATCH_BLOBS -DHAVE_MALLOC_USABLE_SIZE -DHAVE_FDATASYNC -DSQLITE_DEFAULT_WORKER_THREADS=4 -DSQLITE_MAX_PREPARE_RETRY=200")
# Code hardening and debugging improvements
# -fstack-protector-strong: The program will be resistant to having its stack overflowed
@ -154,6 +160,11 @@ set(EXTRAWARN "${EXTRAWARN_GCC6} \
${EXTRAWARN_GCC8} \
${EXTRAWARN_GCC12} \
${EXTRAWARN_GCC13}")
# Remove extra spaces from EXTRAWARN
string(REGEX REPLACE " +" " " EXTRAWARN "${EXTRAWARN}")
# Separate EXTRAWARN into a list of arguments
separate_arguments(EXTRAWARN)
# -Wxor-used-as-pow
@ -219,8 +230,6 @@ set(sources
regex_r.h
resolve.c
resolve.h
setupVars.c
setupVars.h
shmem.c
shmem.h
signals.c
@ -283,9 +292,12 @@ find_package(Threads REQUIRED)
find_library(LIBHOGWEED NAMES libhogweed${CMAKE_STATIC_LIBRARY_SUFFIX} hogweed HINTS /usr/local/lib64)
find_library(LIBGMP NAMES libgmp${CMAKE_STATIC_LIBRARY_SUFFIX} gmp)
find_library(LIBNETTLE NAMES libnettle${CMAKE_STATIC_LIBRARY_SUFFIX} nettle HINTS /usr/local/lib64)
find_library(LIBIDN NAMES libidn${CMAKE_STATIC_LIBRARY_SUFFIX} idn)
target_link_libraries(pihole-FTL rt Threads::Threads ${LIBHOGWEED} ${LIBGMP} ${LIBNETTLE} ${LIBIDN})
# for IDN2 we need the idn2 library which in turn depends on the unistring library
find_library(LIBIDN2 NAMES libidn2${CMAKE_STATIC_LIBRARY_SUFFIX} idn2)
find_library(LIBUNISTRING NAMES libunistring${CMAKE_STATIC_LIBRARY_SUFFIX} unistring)
target_link_libraries(pihole-FTL rt Threads::Threads ${LIBHOGWEED} ${LIBGMP} ${LIBNETTLE} ${LIBIDN2} ${LIBUNISTRING})
if(LUA_DL STREQUAL "true")
find_library(LIBDL dl)

View File

@ -50,6 +50,7 @@
// Number of elements in an array
#define ArraySize(X) (sizeof(X)/sizeof(X[0]))
// Constant socket buffer length
#define SOCKETBUFFERLEN 1024
// How often do we garbage collect (to ensure we only have data fitting to the MAXLOGAGE defined above)? [seconds]
@ -57,7 +58,7 @@
#define GCinterval 600
// Delay applied to the garbage collecting [seconds]
// Default: -60 (one minute before a full hour)
// Default: -60 (one minute before the end of the interval set above)
#define GCdelay (-60)
// How many client connection do we accept at once?
@ -133,6 +134,14 @@
// Special exit code used to signal that FTL wants to restart
#define RESTART_FTL_CODE 22
// How often should the database be analyzed?
// Default: 604800 (once per week)
#define DATABASE_ANALYZE_INTERVAL 604800
// How often should we update client vendor's from the MAC vendor database?
// Default: 2592000 (once per month)
#define DATABASE_MACVENDOR_INTERVAL 2592000
// Use out own syscalls handling functions that will detect possible errors
// and report accordingly in the log. This will make debugging FTL crash
// caused by insufficient memory or by code bugs (not properly dealing
@ -161,6 +170,22 @@
#define pthread_mutex_lock(mutex) FTLpthread_mutex_lock(mutex, __FILE__, __FUNCTION__, __LINE__)
#define fopen(pathname, mode) FTLfopen(pathname, mode, __FILE__, __FUNCTION__, __LINE__)
#define ftlallocate(fd, offset, len) FTLfallocate(fd, offset, len, __FILE__, __FUNCTION__, __LINE__)
#define strlen(str) FTLstrlen(str, __FILE__, __FUNCTION__, __LINE__)
#define strnlen(str, maxlen) FTLstrnlen(str, maxlen, __FILE__, __FUNCTION__, __LINE__)
#define strcpy(dest, src) FTLstrcpy(dest, src, __FILE__, __FUNCTION__, __LINE__)
#define strncpy(dest, src, n) FTLstrncpy(dest, src, n, __FILE__, __FUNCTION__, __LINE__)
#define memset(s, c, n) FTLmemset(s, c, n, __FILE__, __FUNCTION__, __LINE__)
#define memcpy(dest, src, n) FTLmemcpy(dest, src, n, __FILE__, __FUNCTION__, __LINE__)
#define memmove(dest, src, n) FTLmemmove(dest, src, n, __FILE__, __FUNCTION__, __LINE__)
#define strstr(haystack, needle) FTLstrstr(haystack, needle, __FILE__, __FUNCTION__, __LINE__)
#define strcmp(s1, s2) FTLstrcmp(s1, s2, __FILE__, __FUNCTION__, __LINE__)
#define strncmp(s1, s2, n) FTLstrncmp(s1, s2, n, __FILE__, __FUNCTION__, __LINE__)
#define strcasecmp(s1, s2) FTLstrcasecmp(s1, s2, __FILE__, __FUNCTION__, __LINE__)
#define strncasecmp(s1, s2, n) FTLstrncasecmp(s1, s2, n, __FILE__, __FUNCTION__, __LINE__)
#define strcat(dest, src) FTLstrcat(dest, src, __FILE__, __FUNCTION__, __LINE__)
#define strncat(dest, src, n) FTLstrncat(dest, src, n, __FILE__, __FUNCTION__, __LINE__)
#define memcmp(s1, s2, n) FTLmemcmp(s1, s2, n, __FILE__, __FUNCTION__, __LINE__)
#define memmem(haystack, haystacklen, needle, needlelen) FTLmemmem(haystack, haystacklen, needle, needlelen, __FILE__, __FUNCTION__, __LINE__)
#include "syscalls/syscalls.h"
// Preprocessor help functions

View File

@ -30,74 +30,78 @@ static struct {
bool require_auth;
enum http_method methods;
} api_request[] = {
// URI ARGUMENTS FUNCTION OPTIONS AUTH ALLOWED METHODS
// domains json fifo
// URI ARGUMENTS FUNCTION OPTIONS AUTH ALLOWED METHODS
// flags fifo ID
// Note: The order of appearance matters here, more specific URIs have to
// appear *before* less specific URIs: 1. "/a/b/c", 2. "/a/b", 3. "/a"
{ "/api/auth/sessions", "", api_auth_sessions, { false, true, 0 }, true, HTTP_GET },
{ "/api/auth/session", "/{id}", api_auth_session_delete, { false, true, 0 }, true, HTTP_DELETE },
{ "/api/auth/app", "", generateAppPw, { false, true, 0 }, true, HTTP_GET },
{ "/api/auth/totp", "", generateTOTP, { false, true, 0 }, true, HTTP_GET },
{ "/api/auth", "", api_auth, { false, true, 0 }, false, HTTP_GET | HTTP_POST | HTTP_DELETE },
{ "/api/dns/blocking", "", api_dns_blocking, { false, true, 0 }, true, HTTP_GET | HTTP_POST },
{ "/api/clients/_suggestions", "", api_client_suggestions, { false, true, 0 }, true, HTTP_GET },
{ "/api/clients", "/{client}", api_list, { false, true, 0 }, true, HTTP_GET | HTTP_PUT | HTTP_DELETE },
{ "/api/clients", "", api_list, { false, true, 0 }, true, HTTP_POST },
{ "/api/domains", "/{type}/{kind}/{domain}", api_list, { false, true, 0 }, true, HTTP_GET | HTTP_PUT | HTTP_DELETE },
{ "/api/domains", "/{type}/{kind}", api_list, { false, true, 0 }, true, HTTP_POST },
{ "/api/search", "/{domain}", api_search, { false, true, 0 }, true, HTTP_GET },
{ "/api/groups", "/{name}", api_list, { false, true, 0 }, true, HTTP_GET | HTTP_PUT | HTTP_DELETE },
{ "/api/groups", "", api_list, { false, true, 0 }, true, HTTP_POST },
{ "/api/lists", "/{list}", api_list, { false, true, 0 }, true, HTTP_GET | HTTP_PUT | HTTP_DELETE },
{ "/api/lists", "", api_list, { false, true, 0 }, true, HTTP_POST },
{ "/api/info/client", "", api_info_client, { false, true, 0 }, false, HTTP_GET },
{ "/api/info/login", "", api_info_login, { false, true, 0 }, false, HTTP_GET },
{ "/api/info/system", "", api_info_system, { false, true, 0 }, true, HTTP_GET },
{ "/api/info/database", "", api_info_database, { false, true, 0 }, true, HTTP_GET },
{ "/api/info/sensors", "", api_info_sensors, { false, true, 0 }, true, HTTP_GET },
{ "/api/info/host", "", api_info_host, { false, true, 0 }, true, HTTP_GET },
{ "/api/info/ftl", "", api_info_ftl, { false, true, 0 }, true, HTTP_GET },
{ "/api/info/version", "", api_info_version, { false, true, 0 }, true, HTTP_GET },
{ "/api/info/messages/count", "", api_info_messages_count, { false, true, 0 }, true, HTTP_GET },
{ "/api/info/messages", "/{message_id}", api_info_messages, { false, true, 0 }, true, HTTP_DELETE },
{ "/api/info/messages", "", api_info_messages, { false, true, 0 }, true, HTTP_GET },
{ "/api/info/metrics", "", api_info_metrics, { false, true, 0 }, true, HTTP_GET },
{ "/api/logs/dnsmasq", "", api_logs, { false, true, FIFO_DNSMASQ }, true, HTTP_GET },
{ "/api/logs/ftl", "", api_logs, { false, true, FIFO_FTL }, true, HTTP_GET },
{ "/api/logs/webserver", "", api_logs, { false, true, FIFO_WEBSERVER }, true, HTTP_GET },
{ "/api/history/clients", "", api_history_clients, { false, true, 0 }, true, HTTP_GET },
{ "/api/history/database/clients", "", api_history_database_clients, { false, true, 0 }, true, HTTP_GET },
{ "/api/history/database", "", api_history_database, { false, true, 0 }, true, HTTP_GET },
{ "/api/history", "", api_history, { false, true, 0 }, true, HTTP_GET },
{ "/api/queries/suggestions", "", api_queries_suggestions, { false, true, 0 }, true, HTTP_GET },
{ "/api/queries", "", api_queries, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/summary", "", api_stats_summary, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/query_types", "", api_stats_query_types, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/upstreams", "", api_stats_upstreams, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/top_domains", "", api_stats_top_domains, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/top_clients", "", api_stats_top_clients, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/recent_blocked", "", api_stats_recentblocked, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/database/top_domains", "", api_stats_database_top_items, { true, true, 0 }, true, HTTP_GET },
{ "/api/stats/database/top_clients", "", api_stats_database_top_items, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/database/summary", "", api_stats_database_summary, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/database/query_types", "", api_stats_database_query_types, { false, true, 0 }, true, HTTP_GET },
{ "/api/stats/database/upstreams", "", api_stats_database_upstreams, { false, true, 0 }, true, HTTP_GET },
{ "/api/config", "", api_config, { false, true, 0 }, true, HTTP_GET | HTTP_PATCH },
{ "/api/config", "/{element}", api_config, { false, true, 0 }, true, HTTP_GET },
{ "/api/config", "/{element}/{value}", api_config, { false, true, 0 }, true, HTTP_DELETE | HTTP_PUT },
{ "/api/network/gateway", "", api_network_gateway, { false, true, 0 }, true, HTTP_GET },
{ "/api/network/interfaces", "", api_network_interfaces, { false, true, 0 }, true, HTTP_GET },
{ "/api/network/devices", "", api_network_devices, { false, true, 0 }, true, HTTP_GET },
{ "/api/network/devices", "/{device_id}", api_network_devices, { false, true, 0 }, true, HTTP_DELETE },
{ "/api/endpoints", "", api_endpoints, { false, true, 0 }, true, HTTP_GET },
{ "/api/teleporter", "", api_teleporter, { false, false, 0 }, true, HTTP_GET | HTTP_POST },
{ "/api/dhcp/leases", "", api_dhcp_leases_GET, { false, true, 0 }, true, HTTP_GET },
{ "/api/dhcp/leases", "/{ip}", api_dhcp_leases_DELETE, { false, true, 0 }, true, HTTP_DELETE },
{ "/api/action/gravity", "", api_action_gravity, { false, true, 0 }, true, HTTP_POST },
{ "/api/action/restartdns", "", api_action_restartDNS, { false, true, 0 }, true, HTTP_POST },
{ "/api/action/flush/logs", "", api_action_flush_logs, { false, true, 0 }, true, HTTP_POST },
{ "/api/action/flush/arp", "", api_action_flush_arp, { false, true, 0 }, true, HTTP_POST },
{ "/api/docs", "", api_docs, { false, true, 0 }, false, HTTP_GET },
{ "/api/auth/sessions", "", api_auth_sessions, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/auth/session", "/{id}", api_auth_session_delete, { API_PARSE_JSON, 0 }, true, HTTP_DELETE },
{ "/api/auth/app", "", generateAppPw, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/auth/totp", "", generateTOTP, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/auth", "", api_auth, { API_PARSE_JSON, 0 }, false, HTTP_GET | HTTP_POST | HTTP_DELETE },
{ "/api/dns/blocking", "", api_dns_blocking, { API_PARSE_JSON, 0 }, true, HTTP_GET | HTTP_POST },
{ "/api/clients/_suggestions", "", api_client_suggestions, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/clients", "/{client}", api_list, { API_PARSE_JSON, 0 }, true, HTTP_GET | HTTP_PUT | HTTP_DELETE },
{ "/api/clients", "", api_list, { API_PARSE_JSON, 0 }, true, HTTP_POST },
{ "/api/clients:batchDelete", "", api_list, { API_PARSE_JSON | API_BATCHDELETE, 0 }, true, HTTP_POST },
{ "/api/domains", "/{type}/{kind}/{domain}", api_list, { API_PARSE_JSON, 0 }, true, HTTP_GET | HTTP_PUT | HTTP_DELETE },
{ "/api/domains", "/{type}/{kind}", api_list, { API_PARSE_JSON, 0 }, true, HTTP_POST },
{ "/api/domains:batchDelete", "", api_list, { API_PARSE_JSON | API_BATCHDELETE, 0 }, true, HTTP_POST },
{ "/api/search", "/{domain}", api_search, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/groups", "/{name}", api_list, { API_PARSE_JSON, 0 }, true, HTTP_GET | HTTP_PUT | HTTP_DELETE },
{ "/api/groups", "", api_list, { API_PARSE_JSON, 0 }, true, HTTP_POST },
{ "/api/groups:batchDelete", "", api_list, { API_PARSE_JSON | API_BATCHDELETE, 0 }, true, HTTP_POST },
{ "/api/lists", "/{list}", api_list, { API_PARSE_JSON, 0 }, true, HTTP_GET | HTTP_PUT | HTTP_DELETE },
{ "/api/lists", "", api_list, { API_PARSE_JSON, 0 }, true, HTTP_POST },
{ "/api/lists:batchDelete", "", api_list, { API_PARSE_JSON | API_BATCHDELETE, 0 }, true, HTTP_POST },
{ "/api/info/client", "", api_info_client, { API_PARSE_JSON, 0 }, false, HTTP_GET },
{ "/api/info/login", "", api_info_login, { API_PARSE_JSON, 0 }, false, HTTP_GET },
{ "/api/info/system", "", api_info_system, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/info/database", "", api_info_database, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/info/sensors", "", api_info_sensors, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/info/host", "", api_info_host, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/info/ftl", "", api_info_ftl, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/info/version", "", api_info_version, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/info/messages/count", "", api_info_messages_count, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/info/messages", "/{message_id}", api_info_messages, { API_PARSE_JSON, 0 }, true, HTTP_DELETE },
{ "/api/info/messages", "", api_info_messages, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/info/metrics", "", api_info_metrics, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/logs/dnsmasq", "", api_logs, { API_PARSE_JSON, FIFO_DNSMASQ }, true, HTTP_GET },
{ "/api/logs/ftl", "", api_logs, { API_PARSE_JSON, FIFO_FTL }, true, HTTP_GET },
{ "/api/logs/webserver", "", api_logs, { API_PARSE_JSON, FIFO_WEBSERVER }, true, HTTP_GET },
{ "/api/history/clients", "", api_history_clients, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/history/database/clients", "", api_history_database_clients, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/history/database", "", api_history_database, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/history", "", api_history, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/queries/suggestions", "", api_queries_suggestions, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/queries", "", api_queries, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/summary", "", api_stats_summary, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/query_types", "", api_stats_query_types, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/upstreams", "", api_stats_upstreams, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/top_domains", "", api_stats_top_domains, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/top_clients", "", api_stats_top_clients, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/recent_blocked", "", api_stats_recentblocked, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/database/top_domains", "", api_stats_database_top_items, { API_DOMAINS | API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/database/top_clients", "", api_stats_database_top_items, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/database/summary", "", api_stats_database_summary, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/database/query_types", "", api_stats_database_query_types, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/stats/database/upstreams", "", api_stats_database_upstreams, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/config", "", api_config, { API_PARSE_JSON, 0 }, true, HTTP_GET | HTTP_PATCH },
{ "/api/config", "/{element}", api_config, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/config", "/{element}/{value}", api_config, { API_PARSE_JSON, 0 }, true, HTTP_DELETE | HTTP_PUT },
{ "/api/network/gateway", "", api_network_gateway, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/network/interfaces", "", api_network_interfaces, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/network/devices", "", api_network_devices, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/network/devices", "/{device_id}", api_network_devices, { API_PARSE_JSON, 0 }, true, HTTP_DELETE },
{ "/api/endpoints", "", api_endpoints, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/teleporter", "", api_teleporter, { API_FLAG_NONE, 0 }, true, HTTP_GET | HTTP_POST },
{ "/api/dhcp/leases", "", api_dhcp_leases_GET, { API_PARSE_JSON, 0 }, true, HTTP_GET },
{ "/api/dhcp/leases", "/{ip}", api_dhcp_leases_DELETE, { API_PARSE_JSON, 0 }, true, HTTP_DELETE },
{ "/api/action/gravity", "", api_action_gravity, { API_PARSE_JSON, 0 }, true, HTTP_POST },
{ "/api/action/restartdns", "", api_action_restartDNS, { API_PARSE_JSON, 0 }, true, HTTP_POST },
{ "/api/action/flush/logs", "", api_action_flush_logs, { API_PARSE_JSON, 0 }, true, HTTP_POST },
{ "/api/action/flush/arp", "", api_action_flush_arp, { API_PARSE_JSON, 0 }, true, HTTP_POST },
{ "/api/docs", "", api_docs, { API_PARSE_JSON, 0 }, false, HTTP_GET },
};
int api_handler(struct mg_connection *conn, void *ignored)
@ -113,7 +117,7 @@ int api_handler(struct mg_connection *conn, void *ignored)
double_time(),
{ false, NULL, NULL, NULL, 0u },
{ false },
{ false, false, 0 }
{ API_FLAG_NONE, 0 }
};
log_debug(DEBUG_API, "Requested API URI: %s -> %s %s ? %s (Content-Type %s)",
@ -149,7 +153,7 @@ int api_handler(struct mg_connection *conn, void *ignored)
continue;
}
if(api_request[i].opts.parse_json)
if(api_request[i].opts.flags & API_PARSE_JSON)
{
// Allocate memory for the payload
api.payload.raw = calloc(MAX_PAYLOAD_BYTES, sizeof(char));

View File

@ -15,6 +15,8 @@
// type cJSON
#include "webserver/cJSON/cJSON.h"
#include "webserver/http-common.h"
// regex_t
#include "regex_r.h"
// Common definitions
#define LOCALHOSTv4 "127.0.0.1"
@ -24,6 +26,7 @@
int api_handler(struct mg_connection *conn, void *ignored);
// Statistic methods
int __attribute__((pure)) cmpdesc(const void *a, const void *b);
int api_stats_summary(struct ftl_conn *api);
int api_stats_query_types(struct ftl_conn *api);
int api_stats_upstreams(struct ftl_conn *api);
@ -42,6 +45,7 @@ int api_history_database_clients(struct ftl_conn *api);
// Query methods
int api_queries(struct ftl_conn *api);
int api_queries_suggestions(struct ftl_conn *api);
bool compile_filter_regex(struct ftl_conn *api, const char *path, cJSON *json, regex_t **regex, unsigned int *N_regex);
// Statistics methods (database)
int api_stats_database_top_items(struct ftl_conn *api);

View File

@ -16,7 +16,7 @@
#include "log.h"
#include "config/config.h"
// get_password_hash()
#include "setupVars.h"
#include "config/setupVars.h"
// (un)lock_shm()
#include "shmem.h"
// getrandom()
@ -56,6 +56,9 @@ void init_api(void)
void free_api(void)
{
if(auth_data == NULL)
return;
// Store sessions in database
backup_db_sessions(auth_data, max_sessions);
max_sessions = 0;
@ -151,6 +154,7 @@ int check_client_auth(struct ftl_conn *api, const bool is_api)
}
}
// If not, does the client provide a session ID via COOKIE?
bool cookie_auth = false;
if(!sid_avail)
{
@ -162,7 +166,22 @@ int check_client_auth(struct ftl_conn *api, const bool is_api)
// Mark SID as available
sid_avail = true;
}
}
// If not, does the client provide a session ID via URI?
if(!sid_avail && api->request->query_string && GET_VAR("sid", sid, api->request->query_string) > 0)
{
// "+" may have been replaced by " ", undo this here
for(unsigned int i = 0; i < SID_SIZE; i++)
if(sid[i] == ' ')
sid[i] = '+';
// Zero terminate SID string
sid[SID_SIZE-1] = '\0';
// Mention source of SID
sid_source = "URI";
// Mark SID as available
sid_avail = true;
}
if(!sid_avail)
@ -202,7 +221,6 @@ int check_client_auth(struct ftl_conn *api, const bool is_api)
{
if(auth_data[i].used &&
auth_data[i].valid_until >= now &&
strcmp(auth_data[i].remote_addr, api->request->remote_addr) == 0 &&
strcmp(auth_data[i].sid, sid) == 0)
{
if(need_csrf && strcmp(auth_data[i].csrf, csrf) != 0)
@ -217,10 +235,7 @@ int check_client_auth(struct ftl_conn *api, const bool is_api)
}
if(user_id > API_AUTH_UNAUTHORIZED)
{
// Authentication successful:
// - We know this client
// - The session is (still) valid
// - The IP matches the one we know for this SID
// Authentication successful: valid session
// Update timestamp of this client to extend
// the validity of their API authentication
@ -245,8 +260,8 @@ int check_client_auth(struct ftl_conn *api, const bool is_api)
{
char timestr[128];
get_timestr(timestr, auth_data[user_id].valid_until, false, false);
log_debug(DEBUG_API, "Recognized known user: user_id %i, valid_until: %s, remote_addr %s",
user_id, timestr, auth_data[user_id].remote_addr);
log_debug(DEBUG_API, "Recognized known user: user_id %i, valid_until: %s, remote_addr %s (%s at login)",
user_id, timestr, api->request->remote_addr, auth_data[user_id].remote_addr);
}
}
else
@ -324,14 +339,18 @@ static int get_session_object(struct ftl_conn *api, cJSON *json, const int user_
return 0;
}
static void delete_session(const int user_id)
static bool delete_session(const int user_id)
{
// Skip if nothing to be done here
if(user_id < 0 || user_id >= max_sessions)
return;
return false;
const bool was_valid = auth_data[user_id].used;
// Zero out this session (also sets valid to false == 0)
memset(&auth_data[user_id], 0, sizeof(auth_data[user_id]));
return was_valid;
}
void delete_all_sessions(void)
@ -342,24 +361,6 @@ void delete_all_sessions(void)
static int send_api_auth_status(struct ftl_conn *api, const int user_id, const time_t now)
{
if(user_id == API_AUTH_LOCALHOST)
{
log_debug(DEBUG_API, "API Auth status: OK (localhost does not need auth)");
cJSON *json = JSON_NEW_OBJECT();
get_session_object(api, json, user_id, now);
JSON_SEND_OBJECT(json);
}
if(user_id == API_AUTH_EMPTYPASS)
{
log_debug(DEBUG_API, "API Auth status: OK (empty password)");
cJSON *json = JSON_NEW_OBJECT();
get_session_object(api, json, user_id, now);
JSON_SEND_OBJECT(json);
}
if(user_id > API_AUTH_UNAUTHORIZED && (api->method == HTTP_GET || api->method == HTTP_POST))
{
log_debug(DEBUG_API, "API Auth status: OK");
@ -376,17 +377,45 @@ static int send_api_auth_status(struct ftl_conn *api, const int user_id, const t
get_session_object(api, json, user_id, now);
JSON_SEND_OBJECT(json);
}
else if(user_id > API_AUTH_UNAUTHORIZED && api->method == HTTP_DELETE)
else if(api->method == HTTP_DELETE)
{
log_debug(DEBUG_API, "API Auth status: Logout, asking to delete cookie");
if(user_id > API_AUTH_UNAUTHORIZED)
{
log_debug(DEBUG_API, "API Auth status: Logout, asking to delete cookie");
// Revoke client authentication. This slot can be used by a new client afterwards.
delete_session(user_id);
strncpy(pi_hole_extra_headers, FTL_DELETE_COOKIE, sizeof(pi_hole_extra_headers));
// Revoke client authentication. This slot can be used by a new client afterwards.
const int code = delete_session(user_id) ? 204 : 404;
// Send empty reply with appropriate HTTP status code
send_http_code(api, "application/json; charset=utf-8", code, "");
return code;
}
else
{
log_debug(DEBUG_API, "API Auth status: Logout, but not authenticated");
cJSON *json = JSON_NEW_OBJECT();
get_session_object(api, json, user_id, now);
JSON_SEND_OBJECT_CODE(json, 401); // 401 Unauthorized
}
}
else if(user_id == API_AUTH_LOCALHOST)
{
log_debug(DEBUG_API, "API Auth status: OK (localhost does not need auth)");
strncpy(pi_hole_extra_headers, FTL_DELETE_COOKIE, sizeof(pi_hole_extra_headers));
cJSON *json = JSON_NEW_OBJECT();
get_session_object(api, json, user_id, now);
JSON_SEND_OBJECT_CODE(json, 410); // 410 Gone
JSON_SEND_OBJECT(json);
}
else if(user_id == API_AUTH_EMPTYPASS)
{
log_debug(DEBUG_API, "API Auth status: OK (empty password)");
cJSON *json = JSON_NEW_OBJECT();
get_session_object(api, json, user_id, now);
JSON_SEND_OBJECT(json);
}
else
{
@ -551,7 +580,7 @@ int api_auth(struct ftl_conn *api)
{
// Expired slow, mark as unused
if(auth_data[i].used &&
auth_data[i].valid_until < now)
auth_data[i].valid_until < now)
{
log_debug(DEBUG_API, "API: Session of client %u (%s) expired, freeing...",
i, auth_data[i].remote_addr);
@ -622,6 +651,11 @@ int api_auth(struct ftl_conn *api)
"Rate-limiting login attempts",
NULL);
}
else if(result == NO_PASSWORD_SET)
{
// No password set
log_debug(DEBUG_API, "API: Trying to auth with password but none set: '%s'", password);
}
else
{
log_debug(DEBUG_API, "API: Password incorrect: '%s'", password);
@ -655,9 +689,9 @@ int api_auth_session_delete(struct ftl_conn *api)
return send_json_error(api, 400, "bad_request", "Session ID not in use", NULL);
// Delete session
delete_session(uid);
const int code = delete_session(uid) ? 204 : 404;
// Send empty reply with code 204 No Content
send_http_code(api, "application/json; charset=utf-8", 204, "");
return 204;
// Send empty reply with appropriate HTTP status code
send_http_code(api, "application/json; charset=utf-8", code, "");
return code;
}

View File

@ -130,12 +130,22 @@ static cJSON *addJSONvalue(const enum conf_type conf_type, union conf_value *val
return cJSON_CreateStringReference(get_temp_unit_str(val->temp_unit));
case CONF_STRUCT_IN_ADDR:
{
// Special case 0.0.0.0 -> return empty string
if(val->in_addr.s_addr == INADDR_ANY)
return cJSON_CreateStringReference("");
// else: normal address
char addr4[INET_ADDRSTRLEN] = { 0 };
inet_ntop(AF_INET, &val->in_addr, addr4, INET_ADDRSTRLEN);
return cJSON_CreateString(addr4); // Performs a copy
}
case CONF_STRUCT_IN6_ADDR:
{
// Special case :: -> return empty string
if(memcmp(&val->in6_addr, &in6addr_any, sizeof(in6addr_any)) == 0)
return cJSON_CreateStringReference("");
// else: normal address
char addr6[INET6_ADDRSTRLEN] = { 0 };
inet_ntop(AF_INET6, &val->in6_addr, addr6, INET6_ADDRSTRLEN);
return cJSON_CreateString(addr6); // Performs a copy
@ -284,7 +294,7 @@ static const char *getJSONvalue(struct conf_item *conf_item, cJSON *elem, struct
}
if(!set_and_check_password(conf_item, elem->valuestring))
return "Failed to create password hash (verification failed), password remains unchanged";
return "password hash verification failed";
break;
}
@ -402,11 +412,19 @@ static const char *getJSONvalue(struct conf_item *conf_item, cJSON *elem, struct
struct in_addr addr4 = { 0 };
if(!cJSON_IsString(elem))
return "not of type string";
if(!inet_pton(AF_INET, elem->valuestring, &addr4))
if(strlen(elem->valuestring) == 0)
{
// Special case: empty string -> 0.0.0.0
conf_item->v.in_addr.s_addr = INADDR_ANY;
}
else if(inet_pton(AF_INET, elem->valuestring, &addr4))
{
// Set item
memcpy(&conf_item->v.in_addr, &addr4, sizeof(addr4));
}
else
return "not a valid IPv4 address";
// Set item
memcpy(&conf_item->v.in_addr, &addr4, sizeof(addr4));
log_debug(DEBUG_CONFIG, "%s = %s", conf_item->k, elem->valuestring);
log_debug(DEBUG_CONFIG, "%s = \"%s\"", conf_item->k, elem->valuestring);
break;
}
case CONF_STRUCT_IN6_ADDR:
@ -414,11 +432,16 @@ static const char *getJSONvalue(struct conf_item *conf_item, cJSON *elem, struct
struct in6_addr addr6 = { 0 };
if(!cJSON_IsString(elem))
return "not of type string";
if(!inet_pton(AF_INET6, elem->valuestring, &addr6))
if(strlen(elem->valuestring) == 0)
{
// Special case: empty string -> ::
memcpy(&conf_item->v.in6_addr, &in6addr_any, sizeof(in6addr_any));
}
else if(!inet_pton(AF_INET6, elem->valuestring, &addr6))
return "not a valid IPv6 address";
// Set item
memcpy(&conf_item->v.in6_addr, &addr6, sizeof(addr6));
log_debug(DEBUG_CONFIG, "%s = %s", conf_item->k, elem->valuestring);
log_debug(DEBUG_CONFIG, "%s = \"%s\"", conf_item->k, elem->valuestring);
break;
}
case CONF_JSON_STRING_ARRAY:
@ -582,8 +605,7 @@ static int api_config_get(struct ftl_conn *api)
}
// Release allocated memory
if(requested_path != NULL)
free_config_path(requested_path);
free_config_path(requested_path);
cJSON *json = JSON_NEW_OBJECT();
@ -696,8 +718,23 @@ static int api_config_patch(struct ftl_conn *api)
const char *response = getJSONvalue(new_item, elem, &newconf);
if(response != NULL)
{
log_err("/api/config: %s invalid: %s", new_item->k, response);
continue;
char *hint = calloc(strlen(new_item->k) + strlen(response) + 3, sizeof(char));
if(hint == NULL)
{
free_config(&newconf);
return send_json_error(api, 500,
"internal_error",
"Failed to allocate memory for hint",
NULL);
}
strcpy(hint, new_item->k);
strcat(hint, ": ");
strcat(hint, response);
free_config(&newconf);
return send_json_error_free(api, 400,
"bad_request",
"Config item is invalid",
hint, true);
}
// Get pointer to memory location of this conf_item (global)
@ -814,8 +851,7 @@ static int api_config_put_delete(struct ftl_conn *api)
if(min_level < 2)
{
// Release allocated memory
if(requested_path != NULL)
free_config_path(requested_path);
free_config_path(requested_path);
return send_json_error(api, 400,
"bad_request",
@ -861,15 +897,14 @@ static int api_config_put_delete(struct ftl_conn *api)
{
char *key = strdup(new_item->k);
free_config(&newconf);
if(requested_path != NULL)
free_config_path(requested_path);
free_config_path(requested_path);
return send_json_error_free(api, 400,
"bad_request",
"Config items set via environment variables cannot be changed via the API",
key, true);
}
// Check if this entry does already exist in the array
// Check if this entry exists in the array
int idx = 0;
for(; idx < cJSON_GetArraySize(new_item->v.json); idx++)
{
@ -903,13 +938,12 @@ static int api_config_put_delete(struct ftl_conn *api)
if(found)
{
// Remove item from array
found = true;
cJSON_DeleteItemFromArray(new_item->v.json, idx);
}
else
{
// Item not found
message = "Item not found";
hint = "Can only delete existing items";
break;
}
}
@ -927,16 +961,18 @@ static int api_config_put_delete(struct ftl_conn *api)
}
// Release allocated memory
if(requested_path != NULL)
free_config_path(requested_path);
free_config_path(requested_path);
// Error 404 if not found
if(!found || message != NULL)
// Error 404 if config element not found
if(!found)
{
cJSON *json = JSON_NEW_OBJECT();
JSON_SEND_OBJECT_CODE(json, 404);
}
// Error 400 if unique item already present
if(message != NULL)
{
// For any other error, a more specific message will have been added
// above
if(!message)
message = "No item specified";
return send_json_error(api, 400,
"bad_request",
message,

View File

@ -73,7 +73,7 @@ int api_dhcp_leases_GET(struct ftl_conn *api)
}
// defined in dnsmasq_interface.c
extern bool FTL_unlink_DHCP_lease(const char *ipaddr);
extern bool FTL_unlink_DHCP_lease(const char *ipaddr, const char **hint);
// Delete DHCP leases
int api_dhcp_leases_DELETE(struct ftl_conn *api)
@ -85,16 +85,29 @@ int api_dhcp_leases_DELETE(struct ftl_conn *api)
// Send empty reply with code 204 No Content
return send_json_error(api,
400,
"bad_request",
"bad_request",
"The provided IPv4 address is invalid",
api->item);
api->item);
}
// Delete lease
log_debug(DEBUG_API, "Deleting DHCP lease for address %s", api->item);
FTL_unlink_DHCP_lease(api->item);
// Send empty reply with code 204 No Content
const char *hint = NULL;
const bool found = FTL_unlink_DHCP_lease(api->item, &hint);
if(!found && hint != NULL)
{
// Send error when something went wrong (hint is not NULL)
return send_json_error(api,
400,
"bad_request",
"Failed to delete DHCP lease",
hint);
}
// Send empty reply with codes:
// - 204 No Content (if a lease was deleted)
// - 404 Not Found (if no lease was found)
cJSON *json = JSON_NEW_OBJECT();
JSON_SEND_OBJECT_CODE(json, 204);
JSON_SEND_OBJECT_CODE(json, found ? 204 : 404);
}

View File

@ -13,7 +13,7 @@
#include "webserver/json_macros.h"
#include "api.h"
// {s,g}et_blockingstatus()
#include "setupVars.h"
#include "config/setupVars.h"
// set_blockingmode_timer()
#include "timers.h"
#include "shmem.h"

View File

@ -44,7 +44,11 @@ components:
operationId: "add_auth"
security: []
description: |
Login with a password. The password is not stored in the session, and neither when to generating the session token.
Authenticate using a password. The password isn't stored in the session nor used to create the session token. Instead, the session token is produced using a cryptographically secure random number generator. A CSRF token is utilized to guard against CSRF attacks and is necessary when using Cookie-based authentication. However, it's not needed with other authentication methods.
Both the Session ID (SID) and CSRF token remain valid for the session's duration. The session can be extended before its expiration by performing any authenticated action. By default, the session lasts for 5 minutes. It can be invalidated by either logging out or deleting the session. Additionally, the session becomes invalid when the password is altered or a new application password is created.
If two-factor authentication (2FA) is activated, the Time-based One-Time Password (TOTP) token must be included in the request body. Be aware that the TOTP token, generated by your authenticator app, is only valid for 30 seconds. If the TOTP token is missing, invalid, or has been used previously, the login attempt will be unsuccessful.
requestBody:
description: Callback payload
content:
@ -114,21 +118,27 @@ components:
- Authentication
operationId: "delete_groups"
description: |
A logout attempt without a valid session will result in a `401 Unauthorized` error.
This endpoint can be used to delete the current session. It will
invalidate the session token and the CSRF token. The session can be
extended before its expiration by performing any authenticated action.
By default, the session lasts for 5 minutes. It can be invalidated by
either logging out or deleting the session. Additionally, the session
becomes invalid when the password is altered or a new application
password is created.
A session that was not created due to a login cannot be deleted (e.g., empty API password).
You can also delete a session by its ID using the `DELETE /auth/session/{id}` endpoint.
Note that you cannot delete the current session if you have not
authenticated (e.g., no password has been set on your Pi-hole).
responses:
'200':
description: OK (session not deletable)
'204':
description: No Content (deleted)
'404':
description: Not Found (no session active)
content:
application/json:
schema:
allOf:
- $ref: 'auth.yaml#/components/schemas/session'
- $ref: 'common.yaml#/components/schemas/took'
examples:
no_login_required:
$ref: 'auth.yaml#/components/examples/no_login_required'
$ref: 'common.yaml#/components/schemas/took'
'401':
description: Unauthorized
content:
@ -137,17 +147,6 @@ components:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
'410':
description: Gone
content:
application/json:
schema:
allOf:
- $ref: 'auth.yaml#/components/schemas/session'
- $ref: 'common.yaml#/components/schemas/took'
examples:
login_failed:
$ref: 'auth.yaml#/components/examples/login_failed'
session_list:
get:
summary: List of all current sessions
@ -209,6 +208,12 @@ components:
responses:
'204':
description: No Content (deleted)
'404':
description: Not Found (session not found)
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad Request
content:
@ -239,7 +244,10 @@ components:
- Authentication
operationId: "add_app"
description: |
Create a new application password. The generated password is shown only once and cannot be retrieved later so make sure to store it in a safe place. The application password can be used to authenticate against the API instead of the regular password. It does not require 2FA verification. Generating a new application password will invalidate all currently active sessions.
Create a new application password. The generated password is shown only once and cannot be retrieved later - make sure to store it in a safe place. The application password can be used to authenticate against the API instead of the regular password.
It does not require 2FA verification. Generating a new application password will invalidate all currently active sessions.
Note that this endpoint only generates an application password accompanied by its hash. To make this new password effective, the returned `hash` has to be set as `webserver.api.app_password` in the Pi-hole configuration in a follow-up step. This can be done in various ways, e.g. via the API (`PATCH /api/config/webserver/api/app_pwhash`), the graphical web interface (Settings -> All Settings) or by editing the configuration file directly.
responses:
'200':
description: OK

View File

@ -95,6 +95,12 @@ components:
responses:
'204':
description: Item deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
@ -149,7 +155,7 @@ components:
Creates a new client in the `clients` object. The `{client}` itself is specified in the request body (POST JSON).
Clients may be described either by their IP addresses (IPv4 and IPv6 are supported),
IP subnets (CIDR notation, like `192.168.2.0/24`), their MAC addresses (like `12:34:56:78:9A:BC`), by their hostnames (like `localhost`), or by the interface they are connected to (prefaced with a colon, like `:eth0`).</p>
IP subnets (CIDR notation, like `192.168.2.0/24`), their MAC addresses (like `12:34:56:78:9A:BC`), by their hostnames (like `localhost`), or by the interface they are connected to (prefaced with a colon, like `:eth0`).
Note that client recognition by IP addresses (incl. subnet ranges) is preferred over MAC address, host name or interface recognition as the two latter will only be available after some time.
Furthermore, MAC address recognition only works for devices at most one networking hop away from your Pi-hole.
@ -199,6 +205,65 @@ components:
allOf:
- $ref: 'common.yaml#/components/schemas/took'
- $ref: 'common.yaml#/components/errors/unauthorized'
batchDelete:
post:
summary: Delete multiple clients
tags:
- "Client management"
operationId: "batchDelete_clients"
description: |
Deletes multiple clients in the `clients` object. The `{client}`s themselves are specified in the request body (POST JSON).
Clients may be described either by their IP addresses (IPv4 and IPv6 are supported),
IP subnets (CIDR notation, like `192.168.2.0/24`), their MAC addresses (like `12:34:56:78:9A:BC`), by their hostnames (like `localhost`), or by the interface they are connected to (prefaced with a colon, like `:eth0`).</p>
*Note:* There will be no content on success.
requestBody:
description: Callback payload
content:
application/json:
schema:
type: array
items:
type: object
properties:
item:
type: string
description: client IP / MAC / hostname / interface
example:
- "item": "192.168.2.5"
- "item": "::1"
- "item": "12:34:56:78:9A:BC"
- "item": "localhost"
- "item": ":eth0"
responses:
'204':
description: Items deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/bad_request'
- $ref: 'common.yaml#/components/schemas/took'
examples:
no_payload:
$ref: 'clients.yaml#/components/examples/errors/bad_request/no_payload'
'401':
description: Unauthorized
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
schemas:
clients:
get:
@ -209,7 +274,7 @@ components:
description: Array of clients
items:
allOf:
- $ref: 'clients.yaml#/components/schemas/client'
- $ref: 'clients.yaml#/components/schemas/client_object'
- $ref: 'clients.yaml#/components/schemas/comment'
- $ref: 'clients.yaml#/components/schemas/groups'
- $ref: 'clients.yaml#/components/schemas/readonly'
@ -252,25 +317,27 @@ components:
description: Comma-separated list of hostnames (if available)
example: "localhost,ip6-localhost"
client:
type: object
properties:
client:
description: client IP / MAC / hostname / interface
type: string
example: 127.0.0.1
description: client IP / MAC / hostname / interface
type: string
example: 127.0.0.1
client_array:
description: array of client IPs / MACs / hostnames / interfaces
type: array
items:
type: string
example: ["127.0.0.1", "192.168.2.12"]
client_maybe_array:
type: object
properties:
client:
description: array of client IPs / MACs / hostnames / interfaces
type: array
items:
type: string
example: ["127.0.0.1", "192.168.2.12"]
client_maybe_array:
oneOf:
- $ref: 'clients.yaml#/components/schemas/client'
- $ref: 'clients.yaml#/components/schemas/client_array'
oneOf:
- $ref: 'clients.yaml#/components/schemas/client'
- $ref: 'clients.yaml#/components/schemas/client_array'
client_object:
type: object
properties:
client:
$ref: 'clients.yaml#/components/schemas/client'
comment:
type: object
properties:

View File

@ -121,7 +121,7 @@ components:
examples:
invalid_path_depth:
$ref: 'config.yaml#/components/examples/errors/bad_request/invalid_path_depth'
item_not_found:
item_already_present:
$ref: 'config.yaml#/components/examples/errors/bad_request/item_already_present'
'401':
description: Unauthorized
@ -144,6 +144,12 @@ components:
responses:
'204':
description: Item deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
@ -155,8 +161,8 @@ components:
examples:
invalid_path_depth:
$ref: 'config.yaml#/components/examples/errors/bad_request/invalid_path_depth'
item_not_found:
$ref: 'config.yaml#/components/examples/errors/bad_request/item_not_found'
item_already_present:
$ref: 'config.yaml#/components/examples/errors/bad_request/item_already_present'
'401':
description: Unauthorized
content:
@ -233,17 +239,10 @@ components:
type: integer
optimizer:
type: integer
revServer:
type: object
properties:
active:
type: boolean
cidr:
type: string
target:
type: string
domain:
type: string
revServers:
type: array
items:
type: string
blocking:
type: object
properties:
@ -270,8 +269,10 @@ components:
type: boolean
IPv4:
type: string
x-format: ipv4
IPv6:
type: string
x-format: ipv6
blocking:
type: object
properties:
@ -281,8 +282,10 @@ components:
type: boolean
IPv4:
type: string
x-format: ipv4
IPv6:
type: string
x-format: ipv6
rateLimit:
type: object
properties:
@ -297,14 +300,16 @@ components:
type: boolean
start:
type: string
x-format: ipv4
end:
type: string
x-format: ipv4
router:
type: string
domain:
x-format: ipv4
netmask:
type: string
description: |
*Note:* This setting is deprecated and will be removed in a future release. Use dns.domain instead.
x-format: ipv4
leaseTime:
type: string
ipv6:
@ -414,6 +419,8 @@ components:
type: string
maxHistory:
type: integer
maxClients:
type: integer
allow_destructive:
type: boolean
temp:
@ -432,6 +439,8 @@ components:
type: string
gravity:
type: string
gravity_tmp:
type: string
macvendor:
type: string
setupVars:
@ -464,6 +473,8 @@ components:
type: array
items:
type: string
extraLogging:
type: boolean
check:
type: object
properties:
@ -605,11 +616,8 @@ components:
cache:
size: 10000
optimizer: 3600
revServer:
active: false
cidr: "192.168.0.0/24"
target: "192.168.0.1"
domain: "lan"
revServers:
- "true,192.168.0.0/24,192.168.0.1,lan"
blocking:
active: true
mode: 'NULL'
@ -635,7 +643,7 @@ components:
start: "192.168.0.10"
end: "192.168.0.250"
router: "192.168.0.1"
domain: "lan"
netmask: "0.0.0.0"
leaseTime: "24h"
ipv6: true
rapidCommit: true
@ -681,9 +689,10 @@ components:
pwhash: ''
totp_secret: ''
app_pwhash: ''
excludeClients: [ '1.2.3.4', 'localhost', 'fe80::345' ]
excludeDomains: [ 'google.de', 'pi-hole.net' ]
excludeClients: [ '1\.2\.3\.4', 'localhost', 'fe80::345' ]
excludeDomains: [ 'google\\.de', 'pi-hole\.net' ]
maxHistory: 86400
maxClients: 10
allow_destructive: true
temp:
limit: 60.0
@ -692,6 +701,7 @@ components:
pid: "/run/pihole-FTL.pid"
database: "/etc/pihole/pihole-FTL.db"
gravity: "/etc/pihole/gravity.db"
gravity_tmp: "/tmp"
macvendor: "/etc/pihole/macvendor.db"
setupVars: "/etc/pihole/setupVars.conf"
pcap: ""
@ -706,6 +716,7 @@ components:
privacylevel: 0
etc_dnsmasq_d: false
dnsmasq_lines: [ ]
extraLogging: false
check:
load: true
shmem: 90
@ -783,13 +794,6 @@ components:
key: "bad_request"
message: "Invalid path depth"
hint: "Use, e.g., DELETE /config/dnsmasq/upstreams/127.0.0.1 to remove \"127.0.0.1\" from config.dns.upstreams"
item_not_found:
summary: Item to be deleted does not exist
value:
error:
key: "bad_request"
message: "Item not found"
hint: "Can only delete existing items"
item_already_present:
summary: Item to be added exists already
value:

View File

@ -36,10 +36,17 @@ components:
operationId: "delete_dhcp"
description: |
This API hook removes a currently active DHCP lease.
Managing DHCP leases is only possible when the DHCP server is enabled.
*Note:* There will be no content on success.
responses:
'204':
description: Item deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
@ -69,7 +76,7 @@ components:
properties:
expires:
type: integer
description: Expiration time
description: Expiration time (0 = infinite lease, never expires)
example: 1675671991
name:
type: string
@ -105,4 +112,4 @@ components:
type: string
required: true
description: IP address of lease to be modified
example: 192.168.2.222
example: 192.168.2.222

View File

@ -29,7 +29,7 @@ components:
description: |
Change the current blocking mode by setting `blocking` to the desired value.
The optional `timer` object may used to set a timer. Once this timer elapsed, the opposite blocking mode is automatically set.
For instance, you can request `{blocking: true, timer: 60}` to disable Pi-hole for one minute.
For instance, you can request `{blocking: false, timer: 60}` to disable Pi-hole for one minute.
Blocking will be automatically resumed afterwards.
You can terminate a possibly running timer by setting `timer` to `null` (the set mode becomes permanent).
@ -39,9 +39,8 @@ components:
'application/json':
schema:
allOf:
- $ref: 'dns.yaml#/components/schemas/blocking'
- $ref: 'dns.yaml#/components/schemas/blocking_bool'
- $ref: 'dns.yaml#/components/schemas/timer'
- $ref: 'common.yaml#/components/schemas/took'
responses:
'200':
description: OK
@ -83,6 +82,14 @@ components:
- "failed"
- "unknown"
example: "enabled"
blocking_bool:
type: object
properties:
blocking:
type: boolean
description: Blocking status
default: true
example: true
timer:
type: object
properties:

View File

@ -128,6 +128,12 @@ components:
responses:
'204':
description: Item deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
@ -212,6 +218,70 @@ components:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
batchDelete:
summary: Delete multiple domains
post:
summary: Delete multiple domains
tags:
- "Domain management"
operationId: "batchDelete_domains"
description: |
*Note:* There will be no content on success.
requestBody:
description: Callback payload
content:
application/json:
schema:
type: array
items:
type: object
properties:
item:
type: string
description: Domain to delete
example: "example.com"
type:
type: string
description: Type of domain to delete
enum:
- "allow"
- "deny"
example: "allow"
kind:
type: string
description: Kind of domain to delete
enum:
- "exact"
- "regex"
example: "exact"
responses:
'204':
description: Items deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/bad_request'
- $ref: 'common.yaml#/components/schemas/took'
examples:
no_payload:
$ref: 'domains.yaml#/components/examples/errors/bad_request/no_payload'
'401':
description: Unauthorized
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
schemas:
domains:
get:
@ -222,7 +292,8 @@ components:
description: Array of domains
items:
allOf:
- $ref: 'domains.yaml#/components/schemas/domain'
- $ref: 'domains.yaml#/components/schemas/domain_object'
- $ref: 'domains.yaml#/components/schemas/unicode'
- $ref: 'domains.yaml#/components/schemas/type'
- $ref: 'domains.yaml#/components/schemas/kind'
- $ref: 'domains.yaml#/components/schemas/comment'
@ -243,25 +314,34 @@ components:
- $ref: 'domains.yaml#/components/schemas/groups'
- $ref: 'domains.yaml#/components/schemas/enabled'
domain:
description: Domain
type: string
example: testdomain.com
unicode:
type: object
properties:
domain:
description: Domain
unicode:
description: Unicode domain (may be different from `domain` if punycode-encoding is used)
type: string
example: testdomain.com
example: "äbc.com"
domain_array:
description: array of domains
type: array
items:
type: string
example: ["testdomain.com", "otherdomain.de"]
domain_maybe_array:
type: object
properties:
domain:
description: array of domains
type: array
items:
type: string
example: ["testdomain.com", "otherdomain.de"]
domain_maybe_array:
oneOf:
- $ref: 'domains.yaml#/components/schemas/domain'
- $ref: 'domains.yaml#/components/schemas/domain_array'
oneOf:
- $ref: 'domains.yaml#/components/schemas/domain'
- $ref: 'domains.yaml#/components/schemas/domain_array'
domain_object:
type: object
properties:
domain:
$ref: 'domains.yaml#/components/schemas/domain'
type:
type: object
properties:
@ -368,6 +448,7 @@ components:
value:
domains:
- domain: "allowed.com"
unicode: "allowed.com"
type: allow
kind: exact
comment: null
@ -377,7 +458,8 @@ components:
id: 299
date_added: 1611239095
date_modified: 1612163756
- domain: "allowed2.comm"
- domain: "xn--4ca.com"
unicode: "ä.com"
type: allow
kind: regex
comment: "Some text"

View File

@ -63,6 +63,9 @@ components:
- $ref: 'groups.yaml#/components/schemas/groups/get' # identical to GET
- $ref: 'groups.yaml#/components/schemas/lists_processed'
- $ref: 'common.yaml#/components/schemas/took'
headers:
Location:
$ref: 'common.yaml#/components/headers/Location'
'400':
description: Bad request
content:
@ -94,6 +97,12 @@ components:
responses:
'204':
description: Item deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
@ -165,6 +174,63 @@ components:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
batchDelete:
post:
summary: Delete multiple groups
tags:
- "Group management"
operationId: "batchDelete_groups"
description: |
Deletes multiple groups in the `groups` object. The `{groups}` themselves are specified in the request body (POST JSON).
On success, a new resource is created at `/groups/{name}`.
The `database_error` with message `UNIQUE constraint failed` error indicates that a group with the same name already exists.
requestBody:
description: Callback payload
content:
application/json:
schema:
type: array
items:
type: object
properties:
item:
type: string
description: group name
example:
- "item": "test1"
- "item": "test2"
responses:
'204':
description: Items deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/bad_request'
- $ref: 'common.yaml#/components/schemas/took'
examples:
no_payload:
$ref: 'groups.yaml#/components/examples/errors/bad_request/no_payload'
duplicate:
$ref: 'groups.yaml#/components/examples/errors/database_error/duplicate'
'401':
description: Unauthorized
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
schemas:
groups:
get:
@ -174,13 +240,14 @@ components:
type: array
items:
allOf:
- $ref: 'groups.yaml#/components/schemas/name'
- $ref: 'groups.yaml#/components/schemas/name_object'
- $ref: 'groups.yaml#/components/schemas/comment'
- $ref: 'groups.yaml#/components/schemas/enabled'
- $ref: 'groups.yaml#/components/schemas/readonly'
put:
allOf:
- $ref: 'groups.yaml#/components/schemas/name'
# Can rename group
- $ref: 'groups.yaml#/components/schemas/name_object'
- $ref: 'groups.yaml#/components/schemas/comment'
- $ref: 'groups.yaml#/components/schemas/enabled'
post:
@ -189,25 +256,27 @@ components:
- $ref: 'groups.yaml#/components/schemas/comment'
- $ref: 'groups.yaml#/components/schemas/enabled'
name:
type: object
properties:
name:
description: Group name
type: string
example: test_group
description: Group name
type: string
example: test_group
name_array:
description: array of group names
type: array
items:
type: string
example: ["test1", "test2", "test3"]
name_maybe_array:
type: object
properties:
name:
description: array of group names
type: array
items:
type: string
example: ["test1", "test2", "test3"]
name_maybe_array:
oneOf:
- $ref: 'groups.yaml#/components/schemas/name'
- $ref: 'groups.yaml#/components/schemas/name_array'
oneOf:
- $ref: 'groups.yaml#/components/schemas/name'
- $ref: 'groups.yaml#/components/schemas/name_array'
name_object:
type: object
properties:
name:
$ref: 'groups.yaml#/components/schemas/name'
comment:
type: object
properties:

View File

@ -62,7 +62,15 @@ components:
- Metrics
operationId: "get_client_metrics"
description: |
Request data needed to generate the \"Client activity over last 24 hours\" graph
Request data needed to generate the \"Client activity over last 24 hours\" graph.
This endpoint returns the top N clients, sorted by total number of queries within 24 hours. If N is set to 0, all clients will be returned.
The client name is only available if the client's IP address can be resolved to a hostname.
The last client returned is a special client that contains the total number of queries that were not sent by any of the other shown clients , i.e. queries that were sent by clients that are not in the top N. This client is always present, even if it has 0 queries and can be identified by the special name "other clients" (mind the space in the hostname) and the IP address "0.0.0.0".
Note that, due to privacy settings, the returned data may also be empty.
parameters:
- $ref: 'history.yaml#/components/parameters/clients/N'
responses:
'200':
description: OK
@ -142,6 +150,38 @@ components:
client_history:
type: object
properties:
clients:
type: array
description: Data array
items:
type: object
properties:
name:
type: string
nullable: true
description: Client name
ip:
type: string
description: Client IP address
total:
type: integer
description: Total number of queries
example:
- name: localhost
ip: "127.0.0.1"
total: 13428
- name: ip6-localnet
ip: "::1"
total: 2100
- name: null
ip: "192.168.1.1"
total: 254
- name: "pi.hole"
ip: "::"
total: 29
- name: "other clients"
ip: "0.0.0.0"
total: 14
history:
type: array
description: Data array
@ -162,28 +202,22 @@ components:
- 12
- 65
- 67
- 9
- 5
- timestamp: 1511820500.583821
data:
- 1
- 35
- 63
clients:
type: array
description: Data array
items:
type: object
properties:
name:
type: string
nullable: true
description: Client name
ip:
type: string
description: Client IP address
example:
- name: localhost
ip: "127.0.0.1"
- name: ip6-localnet
ip: "::1"
- name: null
ip: "192.168.1.1"
- 20
- 9
parameters:
clients:
N:
in: query
description: Maximum number of clients to return, setting this to 0 will return all clients
name: N
schema:
type: integer
required: false
example: 20

View File

@ -218,10 +218,16 @@ components:
parameters:
- $ref: 'info.yaml#/components/parameters/message_id'
description: |
*Note:* There will be no content on success. You may specify multiple IDs to delete multiple messages at once (comma-separated in the path like `1,2,3`)
You may specify multiple IDs to delete multiple messages at once (comma-separated in the path like `1,2,3`)
responses:
'204':
description: Item deleted
'404':
description: Not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
@ -235,6 +241,14 @@ components:
$ref: 'info.yaml#/components/examples/errors/messages/uri_error'
bad_request:
$ref: 'info.yaml#/components/examples/errors/messages/bad_request'
'401':
description: Unauthorized
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
messages_count:
get:
summary: Get count of Pi-hole diagnosis messages
@ -732,12 +746,121 @@ components:
example: 0.1
"%cpu":
type: number
description: Percentage of total CPU used by FTL
description: Percentage of total CPU used by FTL (ten seconds average)
example: 1.2
allow_destructive:
type: boolean
description: Whether or not FTL is allowed to perform destructive actions
example: true
dnsmasq:
type: object
description: Metrics from the embedded dnsmasq resolver
properties:
dns_cache_inserted:
type: integer
description: Number of inserted entries in DNS cache
example: 8
dns_cache_live_freed:
type: integer
description: Number of freed live entries in DNS cache
example: 0
dns_queries_forwarded:
type: integer
description: Number of forwarded DNS queries
example: 2
dns_auth_answered:
type: integer
description: Number of DNS queries for authoritative zones
example: 0
dns_local_answered:
type: integer
description: Number of DNS queries answered from local cache
example: 74
dns_stale_answered:
type: integer
description: Number of DNS queries answered from local cache (stale entries)
example: 0
dns_unanswered:
type: integer
description: Number of unanswered DNS queries
example: 0
bootp:
type: integer
description: Number of BOOTP requests
example: 0
pxe:
type: integer
description: Number of PXE requests
example: 0
dhcp_ack:
type: integer
description: Number of DHCP ACK
example: 0
dhcp_decline:
type: integer
description: Number of DHCP DECLINE
example: 0
dhcp_discover:
type: integer
description: Number of DHCP DISCOVER
example: 0
dhcp_inform:
type: integer
description: Number of DHCP INFORM
example: 0
dhcp_nak:
type: integer
description: Number of DHCP NAK
example: 0
dhcp_offer:
type: integer
description: Number of DHCP OFFER
example: 0
dhcp_release:
type: integer
description: Number of DHCP RELEASE
example: 0
dhcp_request:
type: integer
description: Number of DHCP REQUEST
example: 0
noanswer:
type: integer
description: Number of DHCP requests without answer (rapid commit)
example: 0
leases_allocated_4:
type: integer
description: Number of allocated IPv4 leases
example: 0
leases_pruned_4:
type: integer
description: Number of pruned IPv4 leases
example: 0
leases_allocated_6:
type: integer
description: Number of allocated IPv6 leases
example: 0
leases_pruned_6:
type: integer
description: Number of pruned IPv6 leases
example: 0
tcp_connections:
type: integer
description: Number of dedicated TCP workers
example: 0
dnssec_max_crypto_use:
type: integer
description: DNSSEC per-query crypto work HWM
example: 0
dnssec_max_sig_fail:
type: integer
description: DNSSEC per-RRSet signature fails HWM
example: 0
dnssec_max_work:
type: integer
description: DNSSEC per-query subqueries HWM
example: 0
database:
type: object
properties:
@ -835,7 +958,7 @@ components:
version:
type: string
nullable: true
description: Remote (Github) Pi-hole Core version
description: Remote (Github) Pi-hole Core version (null if on custom branch)
example: "v6.1"
hash:
type: string
@ -869,7 +992,7 @@ components:
version:
type: string
nullable: true
description: Remote (Github) Pi-hole Web version
description: Remote (Github) Pi-hole Web version (null if on custom branch)
example: "v6.1"
hash:
type: string
@ -908,7 +1031,7 @@ components:
version:
type: string
nullable: true
description: Remote (Github) Pi-hole FTL version
description: Remote (Github) Pi-hole FTL version (null if on custom branch)
example: "v6.1"
hash:
type: string

View File

@ -93,6 +93,12 @@ components:
responses:
'204':
description: Item deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
@ -170,6 +176,52 @@ components:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
batchDelete:
post:
summary: Delete lists
tags:
- "List management"
operationId: "batchDelete_lists"
description: |
Deletes multiple lists in the `lists` object. The `{list}`s themselves are specified in the request body (POST JSON).
On success, a new resource is created at `/lists/{list}`.
The `database_error` with message `UNIQUE constraint failed` error indicates that this list already exists.
requestBody:
description: Callback payload
content:
application/json:
schema:
$ref: 'lists.yaml#/components/schemas/lists/post'
responses:
'204':
description: Items deleted
'404':
description: Item not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/bad_request'
- $ref: 'common.yaml#/components/schemas/took'
examples:
no_payload:
$ref: 'lists.yaml#/components/examples/errors/bad_request/no_payload'
'401':
description: Unauthorized
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/unauthorized'
- $ref: 'common.yaml#/components/schemas/took'
schemas:
lists:
get:
@ -180,7 +232,7 @@ components:
description: Array of lists
items:
allOf:
- $ref: 'lists.yaml#/components/schemas/list'
- $ref: 'lists.yaml#/components/schemas/address_object'
- $ref: 'lists.yaml#/components/schemas/type'
- $ref: 'lists.yaml#/components/schemas/comment'
- $ref: 'lists.yaml#/components/schemas/groups'
@ -194,31 +246,33 @@ components:
- $ref: 'lists.yaml#/components/schemas/enabled'
post:
allOf:
- $ref: 'lists.yaml#/components/schemas/list_maybe_array'
- $ref: 'lists.yaml#/components/schemas/address_maybe_array'
- $ref: 'lists.yaml#/components/schemas/type'
- $ref: 'lists.yaml#/components/schemas/comment'
- $ref: 'lists.yaml#/components/schemas/groups'
- $ref: 'lists.yaml#/components/schemas/enabled'
list:
address:
description: Address of the list
type: string
example: https://hosts-file.net/ad_servers.txt
address_array:
description: array of list addresses
type: array
items:
type: string
example: ["https://hosts-file.net/ad_servers.txt"]
address_maybe_array:
type: object
properties:
address:
description: Address of the list
type: string
example: https://hosts-file.net/ad_servers.txt
list_array:
oneOf:
- $ref: 'lists.yaml#/components/schemas/address'
- $ref: 'lists.yaml#/components/schemas/address_array'
address_object:
type: object
properties:
list:
description: array of list addresses
type: array
items:
type: string
example: ["https://hosts-file.net/ad_servers.txt"]
list_maybe_array:
oneOf:
- $ref: 'lists.yaml#/components/schemas/list'
- $ref: 'lists.yaml#/components/schemas/list_array'
address:
$ref: 'lists.yaml#/components/schemas/address'
type:
type: object
properties:

View File

@ -119,6 +119,10 @@ components:
message:
type: string
description: Log line content
prio:
type: string
nullable: true
description: Log line priority (if available)
example:
- timestamp: 1611729969.0
message: "started, version pi-hole-2.84 cachesize 10000"
@ -132,6 +136,10 @@ components:
type: integer
description: Next ID to query if checking for new log lines
example: 229
pid:
type: integer
description: Process ID of FTL. When this changes, FTL was restarted and nextID should be reset to 0.
example: 2258
file:
type: string
description: Path to respective log file on disk

View File

@ -142,18 +142,27 @@ paths:
/domains/{type}/{kind}:
$ref: 'domains.yaml#/components/paths/type_kind'
/domains:batchDelete:
$ref: 'domains.yaml#/components/paths/batchDelete'
/groups/{name}:
$ref: 'groups.yaml#/components/paths/name'
/groups:
$ref: 'groups.yaml#/components/paths/direct'
/groups:batchDelete:
$ref: 'groups.yaml#/components/paths/batchDelete'
/clients/{client}:
$ref: 'clients.yaml#/components/paths/client'
/clients:
$ref: 'clients.yaml#/components/paths/direct'
/clients:batchDelete:
$ref: 'clients.yaml#/components/paths/batchDelete'
/clients/_suggestions:
$ref: 'clients.yaml#/components/paths/suggestions'
@ -163,6 +172,9 @@ paths:
/lists:
$ref: 'lists.yaml#/components/paths/direct'
/lists:batchDelete:
$ref: 'lists.yaml#/components/paths/batchDelete'
/info/client:
$ref: 'info.yaml#/components/paths/client'

View File

@ -93,6 +93,20 @@ components:
responses:
'204':
description: No Content (deleted)
'404':
description: Not found
content:
application/json:
schema:
$ref: 'common.yaml#/components/schemas/took'
'400':
description: Bad request
content:
application/json:
schema:
allOf:
- $ref: 'common.yaml#/components/errors/bad_request'
- $ref: 'common.yaml#/components/schemas/took'
'401':
description: Unauthorized
content:

View File

@ -216,9 +216,9 @@ components:
time:
type: number
description: Time until the response was received (ms, negative if N/A)
regex_id:
list_id:
type: integer
description: ID of regex (`NULL` if N/A)
description: ID of corresponding database table (adlist for anti-/gravity, else domainlist) (`NULL` if N/A)
nullable: true
upstream:
type: string
@ -237,7 +237,7 @@ components:
reply:
type: "IP"
time: 19
regex_id: NULL
list_id: NULL
upstream: "localhost#5353"
dbid: 112421354
- time: 1581907871.583821
@ -252,7 +252,7 @@ components:
reply:
type: "IP"
time: 12.3
regex_id: NULL
list_id: NULL
upstream: "localhost#5353"
dbid: 112421355
cursor:

View File

@ -406,6 +406,82 @@ components:
type: integer
description: Queries of remaining types
example: 845
status:
type: object
description: Number of individual queries (by status)
properties:
UNKNOWN:
type: integer
description: Type UNKNOWN queries
example: 3
GRAVITY:
type: integer
description: Type GRAVITY queries
example: 72
FORWARDED:
type: integer
description: Type FORWARDED queries
example: 533
CACHE:
type: integer
description: Type CACHE queries
example: 32
REGEX:
type: integer
description: Type REGEX queries
example: 84
DENYLIST:
type: integer
description: Type DENYLIST queries
example: 31
EXTERNAL_BLOCKED_IP:
type: integer
description: Type EXTERNAL_BLOCKED_IP queries
example: 0
EXTERNAL_BLOCKED_NULL:
type: integer
description: Type EXTERNAL_BLOCKED_NULL queries
example: 0
EXTERNAL_BLOCKED_NXRA:
type: integer
description: Type EXTERNAL_BLOCKED_NXRA queries
example: 0
GRAVITY_CNAME:
type: integer
description: Type GRAVITY_CNAME queries
example: 0
REGEX_CNAME:
type: integer
description: Type REGEX_CNAME queries
example: 0
DENYLIST_CNAME:
type: integer
description: Type DENYLIST_CNAME queries
example: 0
RETRIED:
type: integer
description: Type RETRIED queries
example: 0
RETRIED_DNSSEC:
type: integer
description: Type RETRIED_DNSSEC queries
example: 0
IN_PROGRESS:
type: integer
description: Type IN_PROGRESS queries
example: 0
DBBUSY:
type: integer
description: Type DBBUSY queries
example: 0
SPECIAL_DOMAIN:
type: integer
description: Type SPECIAL_DOMAIN queries
example: 0
CACHE_STALE:
type: integer
description: Type CACHE_STALE queries
example: 0
replies:
type: object
description: Number of individual replies

View File

@ -19,7 +19,7 @@
// config struct
#include "../config/config.h"
// read_setupVarsconf()
#include "../setupVars.h"
#include "../config/setupVars.h"
// get_aliasclient_list()
#include "../database/aliasclients.h"
@ -66,46 +66,71 @@ int api_history_clients(struct ftl_conn *api)
JSON_SEND_OBJECT_UNLOCK(json);
}
// Get number of clients to return´
unsigned int Nc = min(counters->clients, config.webserver.api.maxClients.v.u16);
if(api->request->query_string != NULL)
{
// Does the user request a non-default number of clients
get_uint_var(api->request->query_string, "N", &Nc);
// Limit the number of clients to return to the number of
// clients to avoid possible overflows for very large N
// Also allow N=0 to return all clients
if((int)Nc > counters->clients || Nc == 0)
Nc = counters->clients;
}
// Lock shared memory
lock_shm();
// Get clients which the user doesn't want to see
// if skipclient[i] == true then this client should be hidden from
// returned data. We initialize it with false
bool *skipclient = calloc(counters->clients, sizeof(bool));
unsigned int excludeClients = cJSON_GetArraySize(config.webserver.api.excludeClients.v.json);
if(excludeClients > 0)
int *temparray = calloc(2*counters->clients, sizeof(int));
if(skipclient == NULL || temparray == NULL)
{
for(int clientID = 0; clientID < counters->clients; clientID++)
{
// Get client pointer
const clientsData* client = getClient(clientID, true);
if(client == NULL)
continue;
// Check if this client should be skipped
for(unsigned int i = 0; i < excludeClients; i++)
{
cJSON *item = cJSON_GetArrayItem(config.webserver.api.excludeClients.v.json, i);
if(strcmp(getstr(client->ippos), item->valuestring) == 0 ||
strcmp(getstr(client->namepos), item->valuestring) == 0)
skipclient[clientID] = true;
}
}
unlock_shm();
return send_json_error(api, 500,
"internal_error",
"Failed to allocate memory for skipclient array",
NULL);
}
// Also skip clients included in others (in alias-clients)
// Skip clients included in others (in alias-clients)
for(int clientID = 0; clientID < counters->clients; clientID++)
{
// Get client pointer
const clientsData* client = getClient(clientID, true);
if(client == NULL)
continue;
// Check if this client should be skipped
if(!client->flags.aliasclient && client->aliasclient_id > -1)
skipclient[clientID] = true;
}
// Get MAX_CLIENTS clients with the highest number of queries
for(int clientID = 0; clientID < counters->clients; clientID++)
{
// Get client pointer
const clientsData* client = getClient(clientID, true);
// Skip invalid clients
if(client == NULL)
continue;
// Store clientID and number of queries in temporary array
temparray[2*clientID + 0] = clientID;
temparray[2*clientID + 1] = client->count;
}
// Sort temporary array
qsort(temparray, counters->clients, sizeof(int[2]), cmpdesc);
// Main return loop
cJSON *history = JSON_NEW_ARRAY();
int others_total = 0;
for(unsigned int slot = 0; slot < OVERTIME_SLOTS; slot++)
{
cJSON *item = JSON_NEW_OBJECT();
@ -113,22 +138,31 @@ int api_history_clients(struct ftl_conn *api)
// Loop over clients to generate output to be sent to the client
cJSON *data = JSON_NEW_ARRAY();
for(int clientID = 0; clientID < counters->clients; clientID++)
int others = 0;
for(int id = 0; id < counters->clients; id++)
{
if(skipclient[clientID])
continue;
// Get client pointer
const int clientID = temparray[2*id + 0];
const clientsData* client = getClient(clientID, true);
// Skip invalid clients and also those managed by alias clients
if(client == NULL || client->aliasclient_id >= 0)
// Skip invalid (recycled) clients
if(client == NULL)
continue;
const int thisclient = client->overTime[slot];
// Skip clients which should be hidden and add them to the "others" counter.
// Also skip clients when we reached the maximum number of clients to return
if(skipclient[clientID] || id >= (int)Nc)
{
others += client->overTime[slot];
continue;
}
JSON_ADD_NUMBER_TO_ARRAY(data, thisclient);
JSON_ADD_NUMBER_TO_ARRAY(data, client->overTime[slot]);
}
// Add others as last element in the array
others_total += others;
JSON_ADD_NUMBER_TO_ARRAY(data, others);
JSON_ADD_ITEM_TO_OBJECT(item, "data", data);
JSON_ADD_ITEM_TO_ARRAY(history, item);
}
@ -137,25 +171,40 @@ int api_history_clients(struct ftl_conn *api)
// Loop over clients to generate output to be sent to the client
cJSON *clients = JSON_NEW_ARRAY();
for(int clientID = 0; clientID < counters->clients; clientID++)
for(int id = 0; id < counters->clients; id++)
{
if(skipclient[clientID])
continue;
// Get client pointer
const int clientID = temparray[2*id + 0];
const clientsData* client = getClient(clientID, true);
// Skip invalid (recycled) clients
if(client == NULL)
continue;
// Skip clients which should be hidden. Also skip clients when
// we reached the maximum number of clients to return
if(skipclient[clientID] || id >= (int)Nc)
continue;
// Get client name and IP address
const char *client_ip = getstr(client->ippos);
const char *client_name = client->namepos != 0 ? getstr(client->namepos) : NULL;
// Create JSON object for this client
cJSON *item = JSON_NEW_OBJECT();
JSON_REF_STR_IN_OBJECT(item, "name", client_name);
JSON_REF_STR_IN_OBJECT(item, "ip", client_ip);
JSON_ADD_NUMBER_TO_OBJECT(item, "total", client->count);
JSON_ADD_ITEM_TO_ARRAY(clients, item);
}
// Add "others" client
cJSON *item = JSON_NEW_OBJECT();
JSON_REF_STR_IN_OBJECT(item, "name", "other clients");
JSON_REF_STR_IN_OBJECT(item, "ip", "0.0.0.0");
JSON_ADD_NUMBER_TO_OBJECT(item, "total", others_total);
JSON_ADD_ITEM_TO_ARRAY(clients, item);
// Unlock already here to avoid keeping the lock during JSON generation
// This is safe because we don't access any shared memory after this
// point and all strings in the JSON are references to idempotent shared
@ -164,6 +213,7 @@ int api_history_clients(struct ftl_conn *api)
// Free memory
free(skipclient);
free(temparray);
JSON_ADD_ITEM_TO_OBJECT(json, "clients", clients);
JSON_SEND_OBJECT(json);

View File

@ -15,7 +15,7 @@
// sysinfo()
#include <sys/sysinfo.h>
// get_blockingstatus()
#include "setupVars.h"
#include "config/setupVars.h"
// counters
#include "shmem.h"
// get_FTL_db_filesize()
@ -147,7 +147,7 @@ int api_info_database(struct ftl_conn *api)
JSON_ADD_ITEM_TO_OBJECT(json, "owner", owner);
// Add number of queries in on-disk database
const int queries_in_database = get_number_of_queries_in_DB(NULL, "query_storage", true);
const int queries_in_database = get_number_of_queries_in_DB(NULL, "query_storage");
JSON_ADD_NUMBER_TO_OBJECT(json, "queries", queries_in_database);
// Add SQLite library version
@ -590,6 +590,11 @@ static int get_ftl_obj(struct ftl_conn *api, cJSON *ftl)
JSON_ADD_BOOL_TO_OBJECT(ftl, "allow_destructive", config.webserver.api.allow_destructive.v.b);
// dnsmasq struct
cJSON *dnsmasq = JSON_NEW_OBJECT();
get_dnsmasq_metrics_obj(dnsmasq);
JSON_ADD_ITEM_TO_OBJECT(ftl, "dnsmasq", dnsmasq);
// All okay
return 0;
}
@ -664,9 +669,11 @@ int api_info_sensors(struct ftl_conn *api)
// 1. AMD CPU temperature sensor
// 2. Intel CPU temperature sensor
// 3. General CPU temperature sensor
// 4. General SoC temperature sensor (https://discourse.pi-hole.net/t/temperature-value-not-shown/66883)
if(strcmp(name->valuestring, "k10temp") == 0 ||
strcmp(name->valuestring, "coretemp") == 0 ||
strcmp(name->valuestring, "cpu_thermal") == 0)
strcmp(name->valuestring, "cpu_thermal") == 0 ||
strcmp(name->valuestring, "soc_thermal") == 0)
{
cpu_temp_sensor = i;
break;
@ -748,11 +755,26 @@ int api_info_version(struct ftl_conn *api)
//else if(strcmp(key, "FTL_VERSION") == 0)
// JSON_COPY_STR_TO_OBJECT(ftl_local, "version", value);
else if(strcmp(key, "GITHUB_CORE_VERSION") == 0)
JSON_COPY_STR_TO_OBJECT(core_remote, "version", value);
{
if(strcmp(value, "null") == 0)
JSON_ADD_NULL_TO_OBJECT(core_remote, "version");
else
JSON_COPY_STR_TO_OBJECT(core_remote, "version", value);
}
else if(strcmp(key, "GITHUB_WEB_VERSION") == 0)
JSON_COPY_STR_TO_OBJECT(web_remote, "version", value);
{
if(strcmp(value, "null") == 0)
JSON_ADD_NULL_TO_OBJECT(web_remote, "version");
else
JSON_COPY_STR_TO_OBJECT(web_remote, "version", value);
}
else if(strcmp(key, "GITHUB_FTL_VERSION") == 0)
JSON_COPY_STR_TO_OBJECT(ftl_remote, "version", value);
{
if(strcmp(value, "null") == 0)
JSON_ADD_NULL_TO_OBJECT(ftl_remote, "version");
else
JSON_COPY_STR_TO_OBJECT(ftl_remote, "version", value);
}
else if(strcmp(key, "CORE_HASH") == 0)
JSON_COPY_STR_TO_OBJECT(core_local, "hash", value);
else if(strcmp(key, "WEB_HASH") == 0)
@ -938,15 +960,18 @@ static int api_info_messages_DELETE(struct ftl_conn *api)
}
// Delete message with this ID from the database
delete_message(ids);
int deleted = 0;
delete_message(ids, &deleted);
// Free memory
free(id);
cJSON_free(ids);
// Send empty reply with code 204 No Content
// Send empty reply with codes:
// - 204 No Content (if any items were deleted)
// - 404 Not Found (if no items were deleted)
cJSON *json = JSON_NEW_OBJECT();
JSON_SEND_OBJECT_CODE(json, 204);
JSON_SEND_OBJECT_CODE(json, deleted > 0 ? 204 : 404);
}
int api_info_messages(struct ftl_conn *api)

View File

@ -17,6 +17,9 @@
#include "shmem.h"
// getNameFromIP()
#include "database/network-table.h"
// valid_domain()
#include "tools/gravity-parseList.h"
#include <idn2.h>
static int api_list_read(struct ftl_conn *api,
const int code,
@ -55,9 +58,11 @@ static int api_list_read(struct ftl_conn *api,
char *name = NULL;
if(table.client != NULL)
{
// Try to obtain hostname if this is a valid IP address
// Try to obtain hostname
if(isValidIPv4(table.client) || isValidIPv6(table.client))
name = getNameFromIP(NULL, table.client);
else if(isMAC(table.client))
name = getNameFromMAC(table.client);
}
JSON_COPY_STR_TO_OBJECT(row, "client", table.client);
@ -70,10 +75,18 @@ static int api_list_read(struct ftl_conn *api,
}
else // domainlists
{
char *unicode = NULL;
const int rc = idn2_to_unicode_lzlz(table.domain, &unicode, IDN2_NONTRANSITIONAL);
JSON_COPY_STR_TO_OBJECT(row, "domain", table.domain);
if(rc == IDN2_OK)
JSON_COPY_STR_TO_OBJECT(row, "unicode", unicode);
else
JSON_COPY_STR_TO_OBJECT(row, "unicode", table.domain);
JSON_REF_STR_IN_OBJECT(row, "type", table.type);
JSON_REF_STR_IN_OBJECT(row, "kind", table.kind);
JSON_COPY_STR_TO_OBJECT(row, "comment", table.comment);
if(unicode != NULL)
free(unicode);
}
// Groups don't have the groups property
@ -393,11 +406,50 @@ static int api_list_write(struct ftl_conn *api,
strchr(it->valuestring, '\t') != NULL ||
strchr(it->valuestring, '\n') != NULL)
{
cJSON_free(row.items);
if(allocated_json)
cJSON_free(row.items);
return send_json_error(api, 400, // 400 Bad Request
"bad_request",
"Spaces, newlines and tabs are not allowed in domains and URLs",
it->valuestring);
}
if(listtype == GRAVITY_DOMAINLIST_ALLOW_EXACT ||
listtype == GRAVITY_DOMAINLIST_DENY_EXACT)
{
char *punycode = NULL;
const int rc = idn2_to_ascii_lz(it->valuestring, &punycode, IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL);
if (rc != IDN2_OK)
{
// Invalid domain name
return send_json_error(api, 400,
"bad_request",
"Invalid request: Invalid domain name",
idn2_strerror(rc));
}
// Convert punycode domain to lowercase
for(unsigned int i = 0u; i < strlen(punycode); i++)
punycode[i] = tolower(punycode[i]);
// Validate punycode domain
// This will reject domains like äöü{{{.com
// which convert to xn--{{{-pla4gpb.com
if(!valid_domain(punycode, strlen(punycode), false))
{
if(allocated_json)
cJSON_free(row.items);
return send_json_error(api, 400, // 400 Bad Request
"bad_request",
"Spaces, newlines and tabs are not allowed in domains and URLs",
"Invalid domain",
it->valuestring);
}
// Replace domain with punycode version
if(!(it->type & cJSON_IsReference))
free(it->valuestring);
it->valuestring = punycode;
// Remove reference flag
it->type &= ~cJSON_IsReference;
}
}
}
@ -406,11 +458,12 @@ static int api_list_write(struct ftl_conn *api,
if(!okay)
{
// Send error reply
cJSON_free(row.items);
return send_json_error(api, 400, // 400 Bad Request
"regex_error",
"Regex validation failed",
regex_msg);
if(allocated_json)
cJSON_free(row.items);
return send_json_error_free(api, 400, // 400 Bad Request
"regex_error",
"Regex validation failed",
regex_msg, true);
}
// Try to add item(s) to table
@ -450,13 +503,27 @@ static int api_list_write(struct ftl_conn *api,
cJSON_AddItemToArray(okay ? success : errors, details);
}
// Inform the resolver that it needs to reload the domainlists
// Inform the resolver that it needs to reload gravity
set_event(RELOAD_GRAVITY);
int response_code = 201; // 201 - Created
if(api->method == HTTP_PUT)
response_code = 200; // 200 - OK
// Add "Location" header to response
if(snprintf(pi_hole_extra_headers, sizeof(pi_hole_extra_headers), "Location: %s/%s", api->action_path, row.item) >= (int)sizeof(pi_hole_extra_headers))
{
// This may happen for *extremely* long URLs but is not issue in
// itself. Merely add a warning to the log file
log_warn("Could not add Location header to response: URL too long");
// Truncate location by replacing the last characters with "...\0"
pi_hole_extra_headers[sizeof(pi_hole_extra_headers)-4] = '.';
pi_hole_extra_headers[sizeof(pi_hole_extra_headers)-3] = '.';
pi_hole_extra_headers[sizeof(pi_hole_extra_headers)-2] = '.';
pi_hole_extra_headers[sizeof(pi_hole_extra_headers)-1] = '\0';
}
// Send GET style reply
const int ret = api_list_read(api, response_code, listtype, row.item, processed);
@ -472,21 +539,198 @@ static int api_list_remove(struct ftl_conn *api,
const char *item)
{
const char *sql_msg = NULL;
if(gravityDB_delFromTable(listtype, item, &sql_msg))
cJSON *array = api->payload.json;
bool allocated_json = false;
// If this is not a :batchDelete call, then the item is specified in the
// URI, not in the payload. Create a JSON array with the item and use
// that instead
const bool isBatchDelete = api->opts.flags & API_BATCHDELETE;
// If this is a domain callback, we need to translate type/kind into an
// integer for use in the database
if(listtype == GRAVITY_DOMAINLIST_ALLOW_EXACT ||
listtype == GRAVITY_DOMAINLIST_DENY_EXACT ||
listtype == GRAVITY_DOMAINLIST_ALLOW_REGEX ||
listtype == GRAVITY_DOMAINLIST_DENY_REGEX)
{
// Inform the resolver that it needs to reload the domainlists
int type = -1;
switch (listtype)
{
case GRAVITY_DOMAINLIST_ALLOW_EXACT:
type = 0;
break;
case GRAVITY_DOMAINLIST_DENY_EXACT:
type = 1;
break;
case GRAVITY_DOMAINLIST_ALLOW_REGEX:
type = 2;
break;
case GRAVITY_DOMAINLIST_DENY_REGEX:
type = 3;
case GRAVITY_GROUPS:
case GRAVITY_ADLISTS:
case GRAVITY_CLIENTS:
// No type required for these tables
break;
// Aggregate types cannot be handled by this routine
case GRAVITY_GRAVITY:
case GRAVITY_ANTIGRAVITY:
case GRAVITY_DOMAINLIST_ALLOW_ALL:
case GRAVITY_DOMAINLIST_DENY_ALL:
case GRAVITY_DOMAINLIST_ALL_EXACT:
case GRAVITY_DOMAINLIST_ALL_REGEX:
case GRAVITY_DOMAINLIST_ALL_ALL:
default:
return false;
}
// Create new JSON array with the item and type:
// array = [{"item": "example.com", "type": 0}]
array = cJSON_CreateArray();
cJSON *obj = cJSON_CreateObject();
cJSON_AddItemToObject(obj, "item", cJSON_CreateStringReference(item));
cJSON_AddItemToObject(obj, "type", cJSON_CreateNumber(type));
cJSON_AddItemToArray(array, obj);
allocated_json = true;
}
else if(isBatchDelete && listtype == GRAVITY_DOMAINLIST_ALL_ALL)
{
// Loop over all items and parse type/kind for each item
cJSON *it = NULL;
cJSON_ArrayForEach(it, array)
{
if(!cJSON_IsObject(it))
{
return send_json_error(api, 400,
"bad_request",
"Invalid request: Batch delete requires an array of objects",
NULL);
}
// Check if item is a string
cJSON *json_item = cJSON_GetObjectItemCaseSensitive(it, "item");
if(!cJSON_IsString(json_item))
{
return send_json_error(api, 400,
"bad_request",
"Invalid request: Batch delete requires an array of objects with \"item\" as string",
NULL);
}
// Check if type and kind are both present and strings
cJSON *json_type = cJSON_GetObjectItemCaseSensitive(it, "type");
cJSON *json_kind = cJSON_GetObjectItemCaseSensitive(it, "kind");
if(!cJSON_IsString(json_type) || !cJSON_IsString(json_kind))
{
return send_json_error(api, 400,
"bad_request",
"Invalid request: Batch delete requires an array of objects with \"type\" and \"kind\" as string",
NULL);
}
// Parse type and kind
// 0 = allow exact
// 1 = deny exact
// 2 = allow regex
// 3 = deny regex
int type = -1;
if(strcasecmp(json_type->valuestring, "allow") == 0)
{
if(strcasecmp(json_kind->valuestring, "exact") == 0)
type = 0;
else if(strcasecmp(json_kind->valuestring, "regex") == 0)
type = 2;
}
else if(strcasecmp(json_type->valuestring, "deny") == 0)
{
if(strcasecmp(json_kind->valuestring, "exact") == 0)
type = 1;
else if(strcasecmp(json_kind->valuestring, "regex") == 0)
type = 3;
}
// Check if type/kind combination is valid
if(type == -1)
{
return send_json_error(api, 400,
"bad_request",
"Invalid request: Batch delete requires an valid combination of \"type\" and \"kind\" for each object",
NULL);
}
// Replace type/kind with integer type
// array = [{"item": "example.com", "type": 0}]
cJSON_DeleteItemFromObject(it, "type");
cJSON_DeleteItemFromObject(it, "kind");
cJSON_AddNumberToObject(it, "type", type);
}
}
else if(!isBatchDelete)
{
// Create array with object (used for clients, groups, lists)
// array = [{"item": <item>}]
array = cJSON_CreateArray();
cJSON *obj = cJSON_CreateObject();
cJSON_AddItemToObject(obj, "item", cJSON_CreateStringReference(item));
cJSON_AddItemToArray(array, obj);
allocated_json = true;
}
// Verify that the payload is an array of objects each containing an
// item
if(isBatchDelete)
{
cJSON *it = NULL;
cJSON_ArrayForEach(it, array)
{
if(!cJSON_IsObject(it))
{
return send_json_error(api, 400,
"bad_request",
"Invalid request: Batch delete requires an array of objects",
NULL);
}
// Check if item is a string
cJSON *json_item = cJSON_GetObjectItemCaseSensitive(it, "item");
if(!cJSON_IsString(json_item))
{
return send_json_error(api, 400,
"bad_request",
"Invalid request: Batch delete requires an array of objects with \"item\" as string",
NULL);
}
}
}
// From here on, we can assume the JSON payload is valid
unsigned int deleted = 0u;
if(gravityDB_delFromTable(listtype, array, &deleted, &sql_msg))
{
// Inform the resolver that it needs to reload gravity
set_event(RELOAD_GRAVITY);
// Send empty reply with code 204 No Content
// Free memory allocated above
if(allocated_json)
cJSON_free(array);
// Send empty reply with codes:
// - 204 No Content (if any items were deleted)
// - 404 Not Found (if no items were deleted)
cJSON *json = JSON_NEW_OBJECT();
JSON_SEND_OBJECT_CODE(json, 204);
JSON_SEND_OBJECT_CODE(json, deleted > 0u ? 204 : 404);
}
else
{
// Free memory allocated above
if(allocated_json)
cJSON_free(array);
// Send error reply
return send_json_error(api, 400,
"database_error",
"Could not remove domain from database table",
"Could not remove entries from table",
sql_msg);
}
}
@ -495,21 +739,40 @@ int api_list(struct ftl_conn *api)
{
enum gravity_list_type listtype;
bool can_modify = false;
bool batchDelete = false;
if((api->item = startsWith("/api/groups", api)) != NULL)
{
listtype = GRAVITY_GROUPS;
can_modify = true;
}
else if((api->item = startsWith("/api/groups:batchDelete", api)) != NULL)
{
listtype = GRAVITY_GROUPS;
can_modify = true;
batchDelete = true;
}
else if((api->item = startsWith("/api/lists", api)) != NULL)
{
listtype = GRAVITY_ADLISTS;
can_modify = true;
}
else if((api->item = startsWith("/api/lists:batchDelete", api)) != NULL)
{
listtype = GRAVITY_ADLISTS;
can_modify = true;
batchDelete = true;
}
else if((api->item = startsWith("/api/clients", api)) != NULL)
{
listtype = GRAVITY_CLIENTS;
can_modify = true;
}
else if((api->item = startsWith("/api/clients:batchDelete", api)) != NULL)
{
listtype = GRAVITY_CLIENTS;
can_modify = true;
batchDelete = true;
}
else if((api->item = startsWith("/api/domains/allow/exact", api)) != NULL)
{
listtype = GRAVITY_DOMAINLIST_ALLOW_EXACT;
@ -522,7 +785,7 @@ int api_list(struct ftl_conn *api)
}
else if((api->item = startsWith("/api/domains/allow", api)) != NULL)
{
listtype = GRAVITY_DOMAINLIST_ALLOW_ALL;
listtype = GRAVITY_DOMAINLIST_ALLOW_ALL;
}
else if((api->item = startsWith("/api/domains/deny/exact", api)) != NULL)
{
@ -550,6 +813,12 @@ int api_list(struct ftl_conn *api)
{
listtype = GRAVITY_DOMAINLIST_ALL_ALL;
}
else if((api->item = startsWith("/api/domains:batchDelete", api)) != NULL)
{
listtype = GRAVITY_DOMAINLIST_ALL_ALL;
can_modify = true;
batchDelete = true;
}
else
{
return send_json_error(api, 400,
@ -590,7 +859,7 @@ int api_list(struct ftl_conn *api)
return ret;
}
}
else if(can_modify && api->method == HTTP_POST)
else if(can_modify && api->method == HTTP_POST && !batchDelete)
{
// Add item to list identified by payload
if(api->item != NULL && strlen(api->item) != 0)
@ -598,7 +867,7 @@ int api_list(struct ftl_conn *api)
return send_json_error(api, 400,
"uri_error",
"Invalid request: Specify item in payload, not as URI parameter",
NULL);
api->item);
}
else
{
@ -611,7 +880,7 @@ int api_list(struct ftl_conn *api)
return ret;
}
}
else if(can_modify && api->method == HTTP_DELETE)
else if(can_modify && (api->method == HTTP_DELETE || (api->method == HTTP_POST && batchDelete)))
{
// Delete item from list
// We would not actually need the SHM lock here, however, we do

View File

@ -15,6 +15,8 @@
// struct fifologData
#include "log.h"
#include "config/config.h"
// main_pid()
#include "signals.h"
// fifologData is allocated in shared memory for cross-fork compatibility
int api_logs(struct ftl_conn *api)
@ -66,10 +68,12 @@ int api_logs(struct ftl_conn *api)
cJSON *entry = JSON_NEW_OBJECT();
JSON_ADD_NUMBER_TO_OBJECT(entry, "timestamp", fifo_log->logs[api->opts.which].timestamp[i]);
JSON_REF_STR_IN_OBJECT(entry, "message", fifo_log->logs[api->opts.which].message[i]);
JSON_REF_STR_IN_OBJECT(entry, "prio", fifo_log->logs[api->opts.which].prio[i]);
JSON_ADD_ITEM_TO_ARRAY(log, entry);
}
JSON_ADD_ITEM_TO_OBJECT(json, "log", log);
JSON_ADD_NUMBER_TO_OBJECT(json, "nextID", fifo_log->logs[api->opts.which].next_id);
JSON_ADD_NUMBER_TO_OBJECT(json, "pid", main_pid());
// Add file name
const char *logfile = NULL;

View File

@ -31,16 +31,16 @@ static bool getDefaultInterface(char iface[IF_NAMESIZE], in_addr_t *gw)
unsigned long dest_r = 0, gw_r = 0;
unsigned int flags = 0u;
int metric = 0, minmetric = __INT_MAX__;
char iface_r[IF_NAMESIZE] = { 0 };
char buf[1024] = { 0 };
FILE *file;
if((file = fopen("/proc/net/route", "r")))
{
// Parse /proc/net/route - the kernel's IPv4 routing table
char buf[1024] = { 0 };
while(fgets(buf, sizeof(buf), file))
{
if(sscanf(buf, "%s %lx %lx %x %*i %*i %i", iface_r, &dest_r, &gw_r, &flags, &metric) != 5)
char iface_r[IF_NAMESIZE] = { 0 };
if(sscanf(buf, "%15s %lx %lx %x %*i %*i %i", iface_r, &dest_r, &gw_r, &flags, &metric) != 5)
continue;
// Only analyze routes which are UP and whose
@ -440,7 +440,8 @@ static int api_network_devices_DELETE(struct ftl_conn *api)
// Delete row from network table by ID
const char *sql_msg = NULL;
if(!networkTable_deleteDevice(db, device_id, &sql_msg))
int deleted = 0;
if(!networkTable_deleteDevice(db, device_id, &deleted, &sql_msg))
{
// Add SQL message (may be NULL = not available)
return send_json_error(api, 500,
@ -452,9 +453,11 @@ static int api_network_devices_DELETE(struct ftl_conn *api)
// Close database
dbclose(&db);
// Send empty reply with code 204 No Content
// Send empty reply with codes:
// - 204 No Content (if any items were deleted)
// - 404 Not Found (if no items were deleted)
cJSON *json = JSON_NEW_OBJECT();
JSON_SEND_OBJECT_CODE(json, 204);
JSON_SEND_OBJECT_CODE(json, deleted > 0 ? 204 : 404);
}
int api_network_devices(struct ftl_conn *api)

View File

@ -19,7 +19,6 @@
#include "database/aliasclients.h"
// get_memdb()
#include "database/query-table.h"
// dbopen(false, ), dbclose()
#include "database/common.h"
@ -34,8 +33,8 @@ static int add_strings_to_array(struct ftl_conn *api, cJSON *array, const char *
"Could not read from in-memory database",
NULL);
}
sqlite3_stmt *stmt;
sqlite3_stmt *stmt = NULL;
int rc = sqlite3_prepare_v2(memdb, querystr, -1, &stmt, NULL);
if( rc != SQLITE_OK )
{
@ -174,7 +173,7 @@ int api_queries_suggestions(struct ftl_conn *api)
JSON_SEND_OBJECT(json);
}
#define QUERYSTR "SELECT q.id,timestamp,q.type,status,d.domain,f.forward,additional_info,reply_type,reply_time,dnssec,c.ip,c.name,a.content,regex_id"
#define QUERYSTR "SELECT q.id,timestamp,q.type,status,d.domain,f.forward,additional_info,reply_type,reply_time,dnssec,c.ip,c.name,a.content,list_id"
// JOIN: Only return rows where there is a match in BOTH tables
// LEFT JOIN: Return all rows from the left table, and the matched rows from the right table
#define JOINSTR "JOIN client_by_id c ON q.client = c.id JOIN domain_by_id d ON q.domain = d.id LEFT JOIN forward_by_id f ON q.forward = f.id LEFT JOIN addinfo_by_id a ON a.id = q.additional_info"
@ -215,8 +214,8 @@ static void querystr_finish(char *querystr, const char *sort_col, const char *so
sort_col_sql = "q.reply_time";
else if(strcasecmp(sort_col, "dnssec") == 0)
sort_col_sql = "q.dnssec";
else if(strcasecmp(sort_col, "regex.id") == 0)
sort_col_sql = "regex_id";
else if(strcasecmp(sort_col, "list_id") == 0)
sort_col_sql = "list_id";
// ... and the sort direction
if(strcasecmp(sort_dir, "asc") == 0 || strcasecmp(sort_dir, "ascending") == 0)
@ -279,7 +278,7 @@ int api_queries(struct ftl_conn *api)
// Start building database query string
char querystr[QUERYSTRBUFFERLEN] = { 0 };
sprintf(querystr, "%s FROM %s q %s", QUERYSTR, disk ? "disk.query_storage" : "query_storage", JOINSTR);
snprintf(querystr, QUERYSTRBUFFERLEN, "%s FROM %s q %s", QUERYSTR, disk ? "disk.query_storage" : "query_storage", JOINSTR);
int draw = 0;
char domainname[512] = { 0 };
@ -438,39 +437,50 @@ int api_queries(struct ftl_conn *api)
}
}
// Get connection to in-memory database
sqlite3 *db = get_memdb();
// We use this boolean to memorize if we are filtering at all. It is used
// later to decide if we can short-circuit the query counting for
// performance reasons.
bool filtering = false;
// Regex filtering?
regex_t *regex_domains = NULL;
unsigned int N_regex_domains = 0;
if(compile_filter_regex(api, "webserver.api.excludeDomains",
config.webserver.api.excludeDomains.v.json,
&regex_domains, &N_regex_domains))
filtering = true;
regex_t *regex_clients = NULL;
unsigned int N_regex_clients = 0;
if(compile_filter_regex(api, "webserver.api.excludeClients",
config.webserver.api.excludeClients.v.json,
&regex_clients, &N_regex_clients))
filtering = true;
// Finish preparing query string
querystr_finish(querystr, sort_col, sort_dir);
// Attach disk database if necessary
const char *message = "";
if(disk && !attach_disk_database(&message))
// Get connection to in-memory database
sqlite3 *memdb = get_memdb();
if(memdb == NULL)
{
return send_json_error(api, 500,
"internal_error",
"Internal server error, cannot attach disk database",
message);
return send_json_error(api, 500, // 500 Internal error
"database_error",
"Could not read from in-memory database",
NULL);
}
// Prepare SQLite3 statement
sqlite3_stmt *read_stmt = NULL;
int rc = sqlite3_prepare_v2(db, querystr, -1, &read_stmt, NULL);
int rc = sqlite3_prepare_v2(memdb, querystr, -1, &read_stmt, NULL);
if( rc != SQLITE_OK )
{
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to prepare read SQL query",
sqlite3_errstr(rc));
}
// We use this boolean to memorize if we are filtering at all. It is used
// later to decide if we can short-circuit the query counting for
// performance reasons.
bool filtering = false;
// Bind items to prepared statement
if(api->request->query_string != NULL)
{
@ -484,8 +494,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind timestamp:from to SQL query",
@ -501,8 +509,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind timestamp:until to SQL query",
@ -518,8 +524,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind domain to SQL query",
@ -535,8 +539,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind cip to SQL query",
@ -552,8 +554,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind client to SQL query",
@ -569,8 +569,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind upstream to SQL query",
@ -595,8 +593,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind type to SQL query",
@ -605,8 +601,6 @@ int api_queries(struct ftl_conn *api)
}
else
{
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 400,
"bad_request",
"Requested type is invalid",
@ -631,8 +625,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind status to SQL query",
@ -641,8 +633,6 @@ int api_queries(struct ftl_conn *api)
}
else
{
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 400,
"bad_request",
"Requested status is invalid",
@ -667,8 +657,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind reply to SQL query",
@ -677,8 +665,6 @@ int api_queries(struct ftl_conn *api)
}
else
{
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 400,
"bad_request",
"Requested reply is invalid",
@ -703,8 +689,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind dnssec to SQL query",
@ -713,8 +697,6 @@ int api_queries(struct ftl_conn *api)
}
else
{
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 400,
"bad_request",
"Requested dnssec is invalid",
@ -731,8 +713,6 @@ int api_queries(struct ftl_conn *api)
{
sqlite3_reset(read_stmt);
sqlite3_finalize(read_stmt);
if(disk)
detach_disk_database(NULL);
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to bind count to SQL query",
@ -746,19 +726,81 @@ int api_queries(struct ftl_conn *api)
log_debug(DEBUG_API, " with cursor: %lu, start: %u, length: %d", cursor, start, length);
cJSON *queries = JSON_NEW_ARRAY();
unsigned int added = 0, recordsCounted = 0;
unsigned int added = 0, recordsCounted = 0, regex_skipped = 0;
bool skipTheRest = false;
while((rc = sqlite3_step(read_stmt)) == SQLITE_ROW)
{
// Increase number of records from the database
recordsCounted++;
// Apply possible domain regex filters to Query Log
const char *domain = (const char*)sqlite3_column_text(read_stmt, 4); // d.domain
if(N_regex_domains > 0)
{
bool match = false;
// Iterate over all regex filters
for(unsigned int i = 0; i < N_regex_domains; i++)
{
// Check if the domain matches the regex
if(regexec(&regex_domains[i], domain, 0, NULL, 0) == 0)
{
// Domain matches
match = true;
break;
}
}
if(match)
{
// Domain matches, we skip it and adjust the
// counter
recordsCounted--;
regex_skipped++;
continue;
}
}
// Apply possible client regex filters to Query Log
const char *client_ip = (const char*)sqlite3_column_text(read_stmt, 10); // c.ip
const char *client_name = NULL;
if(sqlite3_column_type(read_stmt, 11) == SQLITE_TEXT && sqlite3_column_bytes(read_stmt, 11) > 0)
client_name = (const char*)sqlite3_column_text(read_stmt, 11); // c.name
if(N_regex_clients > 0)
{
bool match = false;
// Iterate over all regex filters
for(unsigned int i = 0; i < N_regex_clients; i++)
{
// Check if the domain matches the regex
if(regexec(&regex_clients[i], client_ip, 0, NULL, 0) == 0)
{
// Client IP matches
match = true;
break;
}
else if(client_name != NULL && regexec(&regex_clients[i], client_name, 0, NULL, 0) == 0)
{
// Client name matches
match = true;
break;
}
}
if(match)
{
// Domain matches, we skip it and adjust the
// counter
recordsCounted--;
regex_skipped++;
continue;
}
}
// Skip all records once we have enough (but still count them)
if(skipTheRest)
continue;
// Check if we have reached the limit
if(added >= (unsigned int)length)
// Length may be set to -1 to indicate we want everything.
if(length > 0 && added >= (unsigned int)length)
{
if(filtering)
{
@ -785,11 +827,29 @@ int api_queries(struct ftl_conn *api)
}
else if(length > 0 && added >= (unsigned int)length)
{
// Length may be set to -1 to indicate we want
// everything.
// Skip everything AFTER we added the requested number
// of queries if length is > 0.
break;
continue;
}
// Check if we have reached the limit
if(added >= (unsigned int)length)
{
if(filtering)
{
// We are filtering, so we have to continue to
// step over the remaining rows to get the
// correct number of total records
skipTheRest = true;
continue;
}
else
{
// We are not filtering, so we can stop here
// The total number of records is the number
// of records in the database
break;
}
}
// Build item object
@ -806,7 +866,7 @@ int api_queries(struct ftl_conn *api)
JSON_COPY_STR_TO_OBJECT(item, "type", get_query_type_str(query.type, &query, buffer));
JSON_REF_STR_IN_OBJECT(item, "status", get_query_status_str(query.status));
JSON_REF_STR_IN_OBJECT(item, "dnssec", get_query_dnssec_str(query.dnssec));
JSON_COPY_STR_TO_OBJECT(item, "domain", sqlite3_column_text(read_stmt, 4)); // d.domain
JSON_COPY_STR_TO_OBJECT(item, "domain", domain);
if(sqlite3_column_type(read_stmt, 5) == SQLITE_TEXT &&
sqlite3_column_bytes(read_stmt, 5) > 0)
@ -820,20 +880,18 @@ int api_queries(struct ftl_conn *api)
JSON_ADD_ITEM_TO_OBJECT(item, "reply", reply);
cJSON *client = JSON_NEW_OBJECT();
JSON_COPY_STR_TO_OBJECT(client, "ip", sqlite3_column_text(read_stmt, 10)); // c.ip
if(sqlite3_column_type(read_stmt, 11) == SQLITE_TEXT &&
sqlite3_column_bytes(read_stmt, 11) > 0)
JSON_COPY_STR_TO_OBJECT(client, "name", sqlite3_column_text(read_stmt, 11)); // c.name
JSON_COPY_STR_TO_OBJECT(client, "ip", client_ip);
if(client_name != NULL)
JSON_COPY_STR_TO_OBJECT(client, "name", client_name);
else
JSON_ADD_NULL_TO_OBJECT(client, "name");
JSON_ADD_ITEM_TO_OBJECT(item, "client", client);
// Add regex_id if it exists
// Add list_id if it exists
if(sqlite3_column_type(read_stmt, 13) == SQLITE_INTEGER)
JSON_ADD_NUMBER_TO_OBJECT(item, "regex_id", sqlite3_column_int(read_stmt, 13)); // regex_id
JSON_ADD_NUMBER_TO_OBJECT(item, "list_id", sqlite3_column_int(read_stmt, 13)); // list_id
else
JSON_ADD_NULL_TO_OBJECT(item, "regex_id");
JSON_ADD_NULL_TO_OBJECT(item, "list_id");
const unsigned char *cname = NULL;
switch(query.status)
@ -872,8 +930,8 @@ int api_queries(struct ftl_conn *api)
added++;
}
log_debug(DEBUG_API, "Sending %u of %lu in memory and %lu on disk queries (counted %u)",
added, mem_dbnum, disk_dbnum, recordsCounted);
log_debug(DEBUG_API, "Sending %u of %lu in memory and %lu on disk queries (counted %u, skipped %u)",
added, mem_dbnum, disk_dbnum, recordsCounted, regex_skipped);
cJSON *json = JSON_NEW_OBJECT();
JSON_ADD_ITEM_TO_OBJECT(json, "queries", queries);
@ -902,13 +960,80 @@ int api_queries(struct ftl_conn *api)
// Finalize statements
sqlite3_finalize(read_stmt);
if(disk && !detach_disk_database(&message))
// Free regex memory if allocated
if(N_regex_domains > 0)
{
return send_json_error(api, 500,
"internal_error",
"Internal server error, cannot detach disk database",
message);
// Free individual regexes
for(unsigned int i = 0; i < N_regex_domains; i++)
regfree(&regex_domains[i]);
// Free array of regex pointers
free(regex_domains);
}
if(N_regex_clients > 0)
{
// Free individual regexes
for(unsigned int i = 0; i < N_regex_clients; i++)
regfree(&regex_clients[i]);
// Free array of regex po^inters
free(regex_clients);
}
JSON_SEND_OBJECT(json);
}
bool compile_filter_regex(struct ftl_conn *api, const char *path, cJSON *json, regex_t **regex, unsigned int *N_regex)
{
const int N = cJSON_GetArraySize(json);
if(N < 1)
return false;
// Set number of regexes (positive = unsigned integer)
*N_regex = N;
// Allocate memory for regex array
*regex = calloc(N, sizeof(regex_t));
if(*regex == NULL)
{
return send_json_error(api, 500,
"internal_error",
"Internal server error, failed to allocate memory for regex array",
NULL);
}
// Compile regexes
unsigned int i = 0;
cJSON *filter = NULL;
cJSON_ArrayForEach(filter, json)
{
// Skip non-string, invalid and empty values
if(!cJSON_IsString(filter) || filter->valuestring == NULL || strlen(filter->valuestring) == 0)
{
log_warn("Skipping invalid regex at %s.%u", path, i);
continue;
}
// Compile regex
int rc = regcomp(&(*regex)[i], filter->valuestring, REG_EXTENDED);
if(rc != 0)
{
// Failed to compile regex
char errbuf[1024] = { 0 };
regerror(rc, &(*regex)[i], errbuf, sizeof(errbuf));
log_err("Failed to compile regex \"%s\": %s",
filter->valuestring, errbuf);
return send_json_error(api, 400,
"bad_request",
"Failed to compile regex",
filter->valuestring);
}
i++;
}
// We are filtering, so we have to continue to step over the
// remaining rows to get the correct number of total records
return true;
}

View File

@ -15,7 +15,7 @@
#include "database/gravity-db.h"
// match_regex()
#include "regex_r.h"
#include <idna.h>
#include <idn2.h>
#define MAX_SEARCH_RESULTS 10000u
@ -182,18 +182,21 @@ int api_search(struct ftl_conn *api)
// use characters drawn from a large repertoire (Unicode), but IDNA
// allows the non-ASCII characters to be represented using only the
// ASCII characters already allowed in so-called host names today.
// idna_to_ascii_lz() convert domain name in the locales encoding to an
// idn2_to_ascii_lz() convert domain name in the locales encoding to an
// ASCII string. The domain name may contain several labels, separated
// by dots. The output buffer must be deallocated by the caller.
// Used flags:
// - IDN2_NFC_INPUT: Input is in Unicode Normalization Form C (NFC)
// - IDN2_NONTRANSITIONAL: Use Unicode TR46 non-transitional processing
char *punycode = NULL;
const Idna_rc rc = idna_to_ascii_lz(domain, &punycode, 0);
if (rc != IDNA_SUCCESS)
const int rc = idn2_to_ascii_lz(domain, &punycode, IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL);
if (rc != IDN2_OK)
{
// Invalid domain name
return send_json_error(api, 400,
"bad_request",
"Invalid request: Invalid domain name",
idna_strerror(rc));
idn2_strerror(rc));
}
// Convert punycode domain to lowercase

View File

@ -8,24 +8,22 @@
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#include "../FTL.h"
#include "../webserver/http-common.h"
#include "../webserver/json_macros.h"
#include "api.h"
#include "../shmem.h"
#include "../datastructure.h"
#include "FTL.h"
#include "webserver/http-common.h"
#include "webserver/json_macros.h"
#include "api/api.h"
#include "shmem.h"
#include "datastructure.h"
// read_setupVarsconf()
#include "../setupVars.h"
#include "config/setupVars.h"
// logging routines
#include "../log.h"
#include "log.h"
// config struct
#include "../config/config.h"
// in_auditlist()
#include "../database/gravity-db.h"
#include "config/config.h"
// overTime data
#include "../overTime.h"
#include "overTime.h"
// enum REGEX
#include "../regex_r.h"
#include "regex_r.h"
// sqrt()
#include <math.h>
@ -44,7 +42,7 @@ static int __attribute__((pure)) cmpasc(const void *a, const void *b)
} */
// qsort subroutine, sort DESC
static int __attribute__((pure)) cmpdesc(const void *a, const void *b)
int __attribute__((pure)) cmpdesc(const void *a, const void *b)
{
const int *elem1 = (int*)a;
const int *elem2 = (int*)b;
@ -100,6 +98,10 @@ int api_stats_summary(struct ftl_conn *api)
return ret;
JSON_ADD_ITEM_TO_OBJECT(queries, "types", types);
cJSON *statuses = JSON_NEW_OBJECT();
for(enum query_status status = 0; status < QUERY_STATUS_MAX; status++)
JSON_ADD_NUMBER_TO_OBJECT(statuses, get_query_status_str(status), counters->status[status]);
JSON_ADD_ITEM_TO_OBJECT(queries, "status", statuses);
cJSON *replies = JSON_NEW_OBJECT();
for(enum reply_type reply = 0; reply <QUERY_REPLY_MAX; reply++)
@ -135,15 +137,6 @@ int api_stats_summary(struct ftl_conn *api)
int api_stats_top_domains(struct ftl_conn *api)
{
int count = 10;
bool audit = false;
int *temparray = calloc(2*counters->domains, sizeof(int*));
if(temparray == NULL)
{
log_err("Memory allocation failed in %s()", __FUNCTION__);
return 0;
}
// Exit before processing any data if requested via config setting
if(config.misc.privacylevel.v.privacy_level >= PRIVACY_HIDE_DOMAINS)
{
@ -155,29 +148,37 @@ int api_stats_top_domains(struct ftl_conn *api)
cJSON *json = JSON_NEW_OBJECT();
cJSON *top_domains = JSON_NEW_ARRAY();
JSON_ADD_ITEM_TO_OBJECT(json, "top_domains", top_domains);
free(temparray);
JSON_SEND_OBJECT(json);
}
bool blocked = false; // Can be overwritten by query string
// /api/stats/top_domains?blocked=true
if(api->request->query_string != NULL)
{
// Should blocked clients be shown?
get_bool_var(api->request->query_string, "blocked", &blocked);
// Does the user request a non-default number of replies?
// Note: We do not accept zero query requests here
get_int_var(api->request->query_string, "count", &count);
// Apply Audit Log filtering?
get_bool_var(api->request->query_string, "audit", &audit);
}
// Lock shared memory
lock_shm();
for(int domainID=0; domainID < counters->domains; domainID++)
// Allocate memory
const int domains = counters->domains;
int *temparray = calloc(2*domains, sizeof(int));
if(temparray == NULL)
{
log_err("Memory allocation failed in %s()", __FUNCTION__);
return 0;
}
bool blocked = false; // Can be overwritten by query string
int count = 10;
// /api/stats/top_domains?blocked=true
if(api->request->query_string != NULL)
{
// Should blocked domains be shown?
get_bool_var(api->request->query_string, "blocked", &blocked);
// Does the user request a non-default number of replies?
// Note: We do not accept zero query requests here
get_int_var(api->request->query_string, "count", &count);
}
unsigned int added_domains = 0u;
for(int domainID = 0; domainID < domains; domainID++)
{
// Get domain pointer
const domainsData* domain = getDomain(domainID, true);
@ -190,21 +191,23 @@ int api_stats_top_domains(struct ftl_conn *api)
else
// Count only permitted queries
temparray[2*domainID + 1] = (domain->count - domain->blockedcount);
added_domains++;
}
// Sort temporary array
qsort(temparray, counters->domains, sizeof(int[2]), cmpdesc);
qsort(temparray, added_domains, sizeof(int[2]), cmpdesc);
// Get filter
const char* filter = read_setupVarsconf("API_QUERY_LOG_SHOW");
const char* log_show = read_setupVarsconf("API_QUERY_LOG_SHOW");
bool showpermitted = true, showblocked = true;
if(filter != NULL)
if(log_show != NULL)
{
if((strcmp(filter, "permittedonly")) == 0)
if((strcmp(log_show, "permittedonly")) == 0)
showblocked = false;
else if((strcmp(filter, "blockedonly")) == 0)
else if((strcmp(log_show, "blockedonly")) == 0)
showpermitted = false;
else if((strcmp(filter, "nothing")) == 0)
else if((strcmp(log_show, "nothing")) == 0)
{
showpermitted = false;
showblocked = false;
@ -213,11 +216,15 @@ int api_stats_top_domains(struct ftl_conn *api)
clearSetupVarsArray();
// Get domains which the user doesn't want to see
unsigned int excludeDomains = cJSON_GetArraySize(config.webserver.api.excludeDomains.v.json);
regex_t *regex_domains = NULL;
unsigned int N_regex_domains = 0;
compile_filter_regex(api, "webserver.api.excludeDomains",
config.webserver.api.excludeDomains.v.json,
&regex_domains, &N_regex_domains);
int n = 0;
cJSON *top_domains = JSON_NEW_ARRAY();
for(int i = 0; i < counters->domains; i++)
for(unsigned int i = 0; i < added_domains; i++)
{
// Get sorted index
const int domainID = temparray[2*i + 0];
@ -226,33 +233,31 @@ int api_stats_top_domains(struct ftl_conn *api)
if(domain == NULL)
continue;
// Skip this domain if there is a filter on it (but only if not in audit mode)
if(!audit)
// Get domain name
const char *domain_name = getstr(domain->domainpos);
// Hidden domain, probably due to privacy level. Skip this in the top lists
if(strcmp(domain_name, HIDDEN_DOMAIN) == 0)
continue;
// Skip this client if there is a filter on it
bool skip_domain = false;
if(N_regex_domains > 0)
{
// Check if this client should be skipped
bool skip_domain = false;
for(unsigned int j = 0; j < excludeDomains; j++)
// Iterate over all regex filters
for(unsigned int j = 0; j < N_regex_domains; j++)
{
cJSON *item = cJSON_GetArrayItem(config.webserver.api.excludeDomains.v.json, j);
if(strcmp(getstr(domain->domainpos), item->valuestring) == 0)
// Check if the domain matches the regex
if(regexec(&regex_domains[j], domain_name, 0, NULL, 0) == 0)
{
// Domain matches
skip_domain = true;
break;
}
}
if(skip_domain)
continue;
}
// Skip this domain if already audited
if(audit && in_auditlist(getstr(domain->domainpos)) > 0)
{
log_debug(DEBUG_API, "API: %s has been audited.", getstr(domain->domainpos));
continue;
}
// Hidden domain, probably due to privacy level. Skip this in the top lists
if(strcmp(getstr(domain->domainpos), HIDDEN_DOMAIN) == 0)
if(skip_domain)
continue;
int domain_count = -1;
@ -269,7 +274,7 @@ int api_stats_top_domains(struct ftl_conn *api)
if(domain_count > -1)
{
cJSON *domain_item = JSON_NEW_OBJECT();
JSON_REF_STR_IN_OBJECT(domain_item, "domain", getstr(domain->domainpos));
JSON_REF_STR_IN_OBJECT(domain_item, "domain", domain_name);
JSON_ADD_NUMBER_TO_OBJECT(domain_item, "count", domain_count);
JSON_ADD_ITEM_TO_ARRAY(top_domains, domain_item);
}
@ -280,12 +285,23 @@ int api_stats_top_domains(struct ftl_conn *api)
}
free(temparray);
// Free regexes
if(N_regex_domains > 0)
{
// Free individual regexes
for(unsigned int i = 0; i < N_regex_domains; i++)
regfree(&regex_domains[i]);
// Free array of regex pointers
free(regex_domains);
}
cJSON *json = JSON_NEW_OBJECT();
JSON_ADD_ITEM_TO_OBJECT(json, "domains", top_domains);
const int blocked_queries = get_blocked_count();
const int blocked_count = get_blocked_count();
JSON_ADD_NUMBER_TO_OBJECT(json, "total_queries", counters->queries);
JSON_ADD_NUMBER_TO_OBJECT(json, "blocked_queries", blocked_queries);
JSON_ADD_NUMBER_TO_OBJECT(json, "blocked_queries", blocked_count);
JSON_SEND_OBJECT_UNLOCK(json);
}
@ -293,8 +309,8 @@ int api_stats_top_domains(struct ftl_conn *api)
int api_stats_top_clients(struct ftl_conn *api)
{
int count = 10;
bool includezeroclients = false;
int *temparray = calloc(2*counters->clients, sizeof(int*));
const int clients = counters->clients;
int *temparray = calloc(2*clients, sizeof(int));
if(temparray == NULL)
{
log_err("Memory allocation failed in api_stats_top_clients()");
@ -325,15 +341,12 @@ int api_stats_top_clients(struct ftl_conn *api)
// Does the user request a non-default number of replies?
// Note: We do not accept zero query requests here
get_int_var(api->request->query_string, "count", &count);
// Show also clients which have not been active recently?
get_bool_var(api->request->query_string, "withzero", &includezeroclients);
}
// Lock shared memory
lock_shm();
for(int clientID = 0; clientID < counters->clients; clientID++)
for(int clientID = 0; clientID < clients; clientID++)
{
// Get client pointer
const clientsData* client = getClient(clientID, true);
@ -348,14 +361,18 @@ int api_stats_top_clients(struct ftl_conn *api)
}
// Sort temporary array
qsort(temparray, counters->clients, sizeof(int[2]), cmpdesc);
qsort(temparray, clients, sizeof(int[2]), cmpdesc);
// Get clients which the user doesn't want to see
unsigned int excludeClients = cJSON_GetArraySize(config.webserver.api.excludeClients.v.json);
regex_t *regex_clients = NULL;
unsigned int N_regex_clients = 0;
compile_filter_regex(api, "webserver.api.excludeClients",
config.webserver.api.excludeClients.v.json,
&regex_clients, &N_regex_clients);
int n = 0;
cJSON *top_clients = JSON_NEW_ARRAY();
for(int i=0; i < counters->clients; i++)
for(int i = 0; i < clients; i++)
{
// Get sorted indices and counter values (may be either total or blocked count)
const int clientID = temparray[2*i + 0];
@ -365,33 +382,43 @@ int api_stats_top_clients(struct ftl_conn *api)
if(client == NULL)
continue;
// Skip this client if there is a filter on it
bool skip_domain = false;
for(unsigned int j = 0; j < excludeClients; j++)
{
cJSON *item = cJSON_GetArrayItem(config.webserver.api.excludeClients.v.json, j);
if(strcmp(getstr(client->ippos), item->valuestring) == 0 ||
strcmp(getstr(client->namepos), item->valuestring) == 0)
{
skip_domain = true;
break;
}
}
if(skip_domain)
continue;
// Hidden client, probably due to privacy level. Skip this in the top lists
if(strcmp(getstr(client->ippos), HIDDEN_CLIENT) == 0)
continue;
// Get client IP and name
// Get IP and host name of client
const char *client_ip = getstr(client->ippos);
const char *client_name = getstr(client->namepos);
// Return this client if either
// - "withzero" option is set, and/or
// - the client made at least one query within the most recent 24 hours
if(includezeroclients || count > 0)
// Hidden client, probably due to privacy level. Skip this in the top lists
if(strcmp(client_ip, HIDDEN_CLIENT) == 0)
continue;
// Skip this client if there is a filter on it
bool skip_client = false;
if(N_regex_clients > 0)
{
// Iterate over all regex filters
for(unsigned int j = 0; j < N_regex_clients; j++)
{
// Check if the domain matches the regex
if(regexec(&regex_clients[j], client_ip, 0, NULL, 0) == 0)
{
// Client IP matches
skip_client = true;
break;
}
else if(client_name != NULL && regexec(&regex_clients[j], client_name, 0, NULL, 0) == 0)
{
// Client name matches
skip_client = true;
break;
}
}
}
if(skip_client)
continue;
// Return this client if the client made at least one query
// within the most recent 24 hours
if(client_count > 0)
{
cJSON *client_item = JSON_NEW_OBJECT();
JSON_REF_STR_IN_OBJECT(client_item, "name", client_name);
@ -407,11 +434,22 @@ int api_stats_top_clients(struct ftl_conn *api)
// Free temporary array
free(temparray);
// Free regexes
if(N_regex_clients > 0)
{
// Free individual regexes
for(unsigned int i = 0; i < N_regex_clients; i++)
regfree(&regex_clients[i]);
// Free array of regex pointers
free(regex_clients);
}
cJSON *json = JSON_NEW_OBJECT();
JSON_ADD_ITEM_TO_OBJECT(json, "clients", top_clients);
const int blocked_queries = get_blocked_count();
JSON_ADD_NUMBER_TO_OBJECT(json, "blocked_queries", blocked_queries);
const int blocked_count = get_blocked_count();
JSON_ADD_NUMBER_TO_OBJECT(json, "blocked_queries", blocked_count);
JSON_ADD_NUMBER_TO_OBJECT(json, "total_queries", counters->queries);
JSON_SEND_OBJECT_UNLOCK(json);
}
@ -419,9 +457,9 @@ int api_stats_top_clients(struct ftl_conn *api)
int api_stats_upstreams(struct ftl_conn *api)
{
const int forwarded = get_forwarded_count();
unsigned int totalcount = 0;
int *temparray = calloc(2*forwarded, sizeof(int*));
const int upstreams = counters->upstreams;
int *temparray = calloc(2*upstreams, sizeof(int));
if(temparray == NULL)
{
log_err("Memory allocation failed in api_stats_upstreams()");
@ -431,7 +469,7 @@ int api_stats_upstreams(struct ftl_conn *api)
// Lock shared memory
lock_shm();
for(int upstreamID = 0; upstreamID < counters->upstreams; upstreamID++)
for(int upstreamID = 0; upstreamID < upstreams; upstreamID++)
{
// Get upstream pointer
const upstreamsData* upstream = getUpstream(upstreamID, true);
@ -439,20 +477,16 @@ int api_stats_upstreams(struct ftl_conn *api)
continue;
temparray[2*upstreamID + 0] = upstreamID;
unsigned int count = 0;
for(unsigned i = 0; i < ArraySize(upstream->overTime); i++)
count += upstream->overTime[i];
temparray[2*upstreamID + 1] = count;
totalcount += count;
temparray[2*upstreamID + 1] = upstream->count;
totalcount += upstream->count;
}
// Sort temporary array in descending order
qsort(temparray, counters->upstreams, sizeof(int[2]), cmpdesc);
qsort(temparray, upstreams, sizeof(int[2]), cmpdesc);
// Loop over available forward destinations
cJSON *upstreams = JSON_NEW_ARRAY();
for(int i = -2; i < min(counters->upstreams, 8); i++)
cJSON *top_upstreams = JSON_NEW_ARRAY();
for(int i = -2; i < upstreams; i++)
{
int count = 0;
const char* ip, *name;
@ -520,7 +554,7 @@ int api_stats_upstreams(struct ftl_conn *api)
JSON_ADD_NUMBER_TO_OBJECT(statistics, "response", responsetime);
JSON_ADD_NUMBER_TO_OBJECT(statistics, "variance", uncertainty);
JSON_ADD_ITEM_TO_OBJECT(upstream, "statistics", statistics);
JSON_ADD_ITEM_TO_ARRAY(upstreams, upstream);
JSON_ADD_ITEM_TO_ARRAY(top_upstreams, upstream);
}
}
@ -528,9 +562,9 @@ int api_stats_upstreams(struct ftl_conn *api)
free(temparray);
cJSON *json = JSON_NEW_OBJECT();
JSON_ADD_ITEM_TO_OBJECT(json, "upstreams", upstreams);
const int forwarded_queries = get_forwarded_count();
JSON_ADD_NUMBER_TO_OBJECT(json, "forwarded_queries", forwarded_queries);
JSON_ADD_ITEM_TO_OBJECT(json, "upstreams", top_upstreams);
const int forwarded_count = get_forwarded_count();
JSON_ADD_NUMBER_TO_OBJECT(json, "forwarded_queries", forwarded_count);
JSON_ADD_NUMBER_TO_OBJECT(json, "total_queries", counters->queries);
JSON_SEND_OBJECT_UNLOCK(json);
}

View File

@ -181,7 +181,7 @@ int api_stats_database_top_items(struct ftl_conn *api)
// Get options from API struct
bool blocked = false; // Can be overwritten by query string
const bool domains = api->opts.domains;
const bool domains = api->opts.flags & API_DOMAINS;
// Get parameters from query string
if(api->request->query_string != NULL)

View File

@ -15,8 +15,18 @@
#include "api/api.h"
// ERRBUF_SIZE
#include "config/dnsmasq_config.h"
// inflate_buffer()
#include "zip/gzip.h"
// find_file_in_tar()
#include "zip/tar.h"
// sqlite3_open_v2()
#include "database/sqlite3.h"
// dbquery()
#include "database/common.h"
// MAX_ROTATIONS
#include "files.h"
#define MAXZIPSIZE (50u*1024*1024)
#define MAXFILESIZE (50u*1024*1024)
static int api_teleporter_GET(struct ftl_conn *api)
{
@ -58,9 +68,9 @@ static int api_teleporter_GET(struct ftl_conn *api)
struct upload_data {
bool too_large;
char *sid;
char *zip_data;
char *zip_filename;
size_t zip_size;
uint8_t *data;
char *filename;
size_t filesize;
};
// Callback function for CivetWeb to determine which fields we want to receive
@ -79,7 +89,7 @@ static int field_found(const char *key,
is_sid = false;
if(strcasecmp(key, "file") == 0 && filename && *filename)
{
data->zip_filename = strdup(filename);
data->filename = strdup(filename);
is_file = true;
return MG_FORM_FIELD_STORAGE_GET;
}
@ -103,21 +113,21 @@ static int field_get(const char *key, const char *value, size_t valuelen, void *
if(is_file)
{
if(data->zip_size + valuelen > MAXZIPSIZE)
if(data->filesize + valuelen > MAXFILESIZE)
{
log_warn("Uploaded Teleporter ZIP archive is too large (limit is %u bytes)",
MAXZIPSIZE);
log_warn("Uploaded Teleporter file is too large (limit is %u bytes)",
MAXFILESIZE);
data->too_large = true;
return MG_FORM_FIELD_HANDLE_ABORT;
}
// Allocate memory for the raw ZIP archive data
data->zip_data = realloc(data->zip_data, data->zip_size + valuelen);
// Copy the raw ZIP archive data
memcpy(data->zip_data + data->zip_size, value, valuelen);
// Store the size of the ZIP archive raw data
data->zip_size += valuelen;
log_debug(DEBUG_API, "Received ZIP archive (%zu bytes, buffer is now %zu bytes)",
valuelen, data->zip_size);
// Allocate memory for the raw file data
data->data = realloc(data->data, data->filesize + valuelen);
// Copy the raw file data
memcpy(data->data + data->filesize, value, valuelen);
// Store the size of the file raw data
data->filesize += valuelen;
log_debug(DEBUG_API, "Received file (%zu bytes, buffer is now %zu bytes)",
valuelen, data->filesize);
}
else if(is_sid)
{
@ -143,24 +153,28 @@ static int field_stored(const char *path, long long file_size, void *user_data)
static int free_upload_data(struct upload_data *data)
{
// Free allocated memory
if(data->zip_filename)
if(data->filename)
{
free(data->zip_filename);
data->zip_filename = NULL;
free(data->filename);
data->filename = NULL;
}
if(data->sid)
{
free(data->sid);
data->sid = NULL;
}
if(data->zip_data)
if(data->data)
{
free(data->zip_data);
data->zip_data = NULL;
free(data->data);
data->data = NULL;
}
return 0;
}
// Private function prototypes
static int process_received_zip(struct ftl_conn *api, struct upload_data *data);
static int process_received_tar_gz(struct ftl_conn *api, struct upload_data *data);
static int api_teleporter_POST(struct ftl_conn *api)
{
struct upload_data data;
@ -170,7 +184,7 @@ static int api_teleporter_POST(struct ftl_conn *api)
// Disallow large ZIP archives (> 50 MB) to prevent DoS attacks.
// Typically, the ZIP archive size should be around 30-100 kB.
if(req_info->content_length > MAXZIPSIZE)
if(req_info->content_length > MAXFILESIZE)
{
free_upload_data(&data);
return send_json_error(api, 400,
@ -191,7 +205,7 @@ static int api_teleporter_POST(struct ftl_conn *api)
}
// Check if we received something we consider being a file
if(data.zip_data == NULL || data.zip_size == 0)
if(data.data == NULL || data.filesize == 0)
{
free_upload_data(&data);
return send_json_error(api, 400,
@ -209,28 +223,46 @@ static int api_teleporter_POST(struct ftl_conn *api)
"ZIP archive too large",
NULL);
}
/*
// Set the payload to the SID we received (if available)
if(data.sid != NULL)
// Check if we received something that claims to be a ZIP archive
// - filename should end in ".zip"
// - the data itself
// - should be at least 40 bytes long
// - start with 0x04034b50 (local file header signature, see https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT)
if(strlen(data.filename) > 4 &&
strcmp(data.filename + strlen(data.filename) - 4, ".zip") == 0 &&
data.filesize >= 40 &&
memcmp(data.data, "\x50\x4b\x03\x04", 4) == 0)
{
const size_t bufsize = strlen(data.sid) + 5;
api->payload.raw = calloc(bufsize, sizeof(char));
strncpy(api->payload.raw, "sid=", 5);
strncat(api->payload.raw, data.sid, bufsize - 4);
return process_received_zip(api, &data);
}
// Check if we received something that claims to be a TAR.GZ archive
// - filename should end in ".tar.gz"
// - the data itself
// - should be at least 40 bytes long
// - start with 0x8b1f (local file header signature, see https://www.ietf.org/rfc/rfc1952.txt)
else if(strlen(data.filename) > 7 &&
strcmp(data.filename + strlen(data.filename) - 7, ".tar.gz") == 0 &&
data.filesize >= 40 &&
memcmp(data.data, "\x1f\x8b", 2) == 0)
{
return process_received_tar_gz(api, &data);
}
// Check if the client is authorized to use this API endpoint
if(check_client_auth(api) == API_AUTH_UNAUTHORIZED)
{
free_upload_data(&data);
return send_json_unauthorized(api);
}
*/
// Process what we received
// else: invalid file
free_upload_data(&data);
return send_json_error(api, 400,
"bad_request",
"Invalid file",
"The uploaded file does not appear to be a valid Pi-hole Teleporter archive");
}
static int process_received_zip(struct ftl_conn *api, struct upload_data *data)
{
char hint[ERRBUF_SIZE];
memset(hint, 0, sizeof(hint));
cJSON *json_files = JSON_NEW_ARRAY();
const char *error = read_teleporter_zip(data.zip_data, data.zip_size, hint, json_files);
const char *error = read_teleporter_zip(data->data, data->filesize, hint, json_files);
if(error != NULL)
{
const size_t msglen = strlen(error) + strlen(hint) + 4;
@ -242,7 +274,7 @@ static int api_teleporter_POST(struct ftl_conn *api)
strcat(msg, ": ");
strcat(msg, hint);
}
free_upload_data(&data);
free_upload_data(data);
return send_json_error_free(api, 400,
"bad_request",
"Invalid ZIP archive",
@ -250,7 +282,7 @@ static int api_teleporter_POST(struct ftl_conn *api)
}
// Free allocated memory
free_upload_data(&data);
free_upload_data(data);
// Send response
cJSON *json = JSON_NEW_OBJECT();
@ -258,6 +290,445 @@ static int api_teleporter_POST(struct ftl_conn *api)
JSON_SEND_OBJECT(json);
}
static struct teleporter_files {
const char *filename; // Filename of the file in the archive
const char *table_name; // Name of the table in the database
const int listtype; // Type of list (only used for domainlist table)
const size_t num_columns; // Number of columns in the table
const char *columns[10]; // List of columns in the table
} teleporter_v5_files[] = {
{
.filename = "adlist.json",
.table_name = "adlist",
.listtype = -1,
.num_columns = 10,
.columns = { "id", "address", "enabled", "date_added", "date_modified", "comment", "date_updated", "number", "invalid_domains", "status" } // abp_entries and type are not defined in Pi-hole v5.x
},{
.filename = "adlist_by_group.json",
.table_name = "adlist_by_group",
.listtype = -1,
.num_columns = 2,
.columns = { "group_id", "adlist_id" }
},{
.filename = "blacklist.exact.json",
.table_name = "domainlist",
.listtype = 1, // GRAVITY_DOMAINLIST_DENY_EXACT
.num_columns = 7,
.columns = { "id", "domain", "enabled", "date_added", "date_modified", "comment", "type" }
},{
.filename = "blacklist.regex.json",
.table_name = "domainlist",
.listtype = 3, // GRAVITY_DOMAINLIST_DENY_REGEX
.num_columns = 7,
.columns = { "id", "domain", "enabled", "date_added", "date_modified", "comment", "type" }
},{
.filename = "client.json",
.table_name = "client",
.listtype = -1,
.num_columns = 5,
.columns = { "id", "ip", "date_added", "date_modified", "comment" }
},{
.filename = "client_by_group.json",
.table_name = "client_by_group",
.listtype = -1,
.num_columns = 2,
.columns = { "group_id", "client_id" }
},{
.filename = "domainlist_by_group.json",
.table_name = "domainlist_by_group",
.listtype = -1,
.num_columns = 2,
.columns = { "group_id", "domainlist_id" }
},{
.filename = "group.json",
.table_name = "group",
.listtype = -1,
.num_columns = 6,
.columns = { "id", "enabled", "name", "date_added", "date_modified", "description" }
},{
.filename = "whitelist.exact.json",
.table_name = "domainlist",
.listtype = 0, // GRAVITY_DOMAINLIST_ALLOW_EXACT
.num_columns = 7,
.columns = { "id", "domain", "enabled", "date_added", "date_modified", "comment", "type" }
},{
.filename = "whitelist.regex.json",
.table_name = "domainlist",
.listtype = 2, // GRAVITY_DOMAINLIST_ALLOW_REGEX
.num_columns = 7,
.columns = { "id", "domain", "enabled", "date_added", "date_modified", "comment", "type" }
}
};
static bool import_json_table(cJSON *json, struct teleporter_files *file)
{
// Check if the JSON object is an array
if(!cJSON_IsArray(json))
{
log_err("import_json_table(%s): JSON object is not an array", file->filename);
return false;
}
// Check if the JSON array is empty, if so, we can return early
const int num_entries = cJSON_GetArraySize(json);
// Check if all the JSON entries contain all the expected columns
cJSON *json_object = NULL;
cJSON_ArrayForEach(json_object, json)
{
if(!cJSON_IsObject(json_object))
{
log_err("import_json_table(%s): JSON array does not contain objects", file->filename);
return false;
}
// If this is a record for the domainlist table, add type/kind
if(strcmp(file->table_name, "domainlist") == 0)
{
// Add type/kind to the JSON object
cJSON_AddNumberToObject(json_object, "type", file->listtype);
}
// Check if the JSON object contains the expected columns
for(size_t i = 0; i < file->num_columns; i++)
{
if(cJSON_GetObjectItemCaseSensitive(json_object, file->columns[i]) == NULL)
{
log_err("import_json_table(%s): JSON object does not contain column \"%s\"", file->filename, file->columns[i]);
return false;
}
}
}
log_info("import_json_table(%s): JSON array contains %d entr%s", file->filename, num_entries, num_entries == 1 ? "y" : "ies");
// Open database connection
sqlite3 *db = NULL;
if(sqlite3_open_v2(config.files.gravity.v.s, &db, SQLITE_OPEN_READWRITE, NULL) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to open database file \"%s\": %s",
file->filename, config.files.database.v.s, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Disable foreign key constraints
if(sqlite3_exec(db, "PRAGMA foreign_keys = OFF;", NULL, NULL, NULL) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to disable foreign key constraints: %s", file->filename, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Start transaction
if(sqlite3_exec(db, "BEGIN TRANSACTION;", NULL, NULL, NULL) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to start transaction: %s", file->filename, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Clear existing table entries
if(file->listtype < 0)
{
// Delete all entries in the table
log_debug(DEBUG_API, "import_json_table(%s): Deleting all entries from table \"%s\"", file->filename, file->table_name);
if(dbquery(db, "DELETE FROM \"%s\";", file->table_name) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to delete entries from table \"%s\": %s",
file->filename, file->table_name, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
}
else
{
// Delete all entries in the table of the same type
log_debug(DEBUG_API, "import_json_table(%s): Deleting all entries from table \"%s\" of type %d", file->filename, file->table_name, file->listtype);
if(dbquery(db, "DELETE FROM \"%s\" WHERE type = %d;", file->table_name, file->listtype) != SQLITE_OK)
{
log_err("import_json_table(%s): Unable to delete entries from table \"%s\": %s",
file->filename, file->table_name, sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
}
// Build dynamic SQL insertion statement
// "INSERT OR IGNORE INTO table (column1, column2, ...) VALUES (?, ?, ...);"
char *sql = sqlite3_mprintf("INSERT OR IGNORE INTO \"%s\" (", file->table_name);
for(size_t i = 0; i < file->num_columns; i++)
{
char *sql2 = sqlite3_mprintf("%s%s", sql, file->columns[i]);
sqlite3_free(sql);
sql = NULL;
if(i < file->num_columns - 1)
{
sql = sqlite3_mprintf("%s, ", sql2);
sqlite3_free(sql2);
sql2 = NULL;
}
else
{
sql = sqlite3_mprintf("%s) VALUES (", sql2);
sqlite3_free(sql2);
sql2 = NULL;
}
}
for(size_t i = 0; i < file->num_columns; i++)
{
char *sql2 = sqlite3_mprintf("%s?", sql);
sqlite3_free(sql);
sql = NULL;
if(i < file->num_columns - 1)
{
sql = sqlite3_mprintf("%s, ", sql2);
sqlite3_free(sql2);
sql2 = NULL;
}
else
{
sql = sqlite3_mprintf("%s);", sql2);
sqlite3_free(sql2);
sql2 = NULL;
}
}
// Prepare SQL statement
sqlite3_stmt *stmt = NULL;
if(sqlite3_prepare_v2(db, sql, -1, &stmt, NULL) != SQLITE_OK)
{
log_err("Unable to prepare SQL statement: %s", sqlite3_errmsg(db));
sqlite3_free(sql);
sqlite3_close(db);
return false;
}
// Free allocated memory
sqlite3_free(sql);
sql = NULL;
// Iterate over all JSON objects
cJSON_ArrayForEach(json_object, json)
{
// Bind values to SQL statement
for(size_t i = 0; i < file->num_columns; i++)
{
cJSON *json_value = cJSON_GetObjectItemCaseSensitive(json_object, file->columns[i]);
if(cJSON_IsString(json_value))
{
// Bind string value
if(sqlite3_bind_text(stmt, i + 1, json_value->valuestring, -1, SQLITE_STATIC) != SQLITE_OK)
{
log_err("Unable to bind text value to SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
else if(cJSON_IsNumber(json_value))
{
// Bind integer value
if(sqlite3_bind_int(stmt, i + 1, json_value->valueint) != SQLITE_OK)
{
log_err("Unable to bind integer value to SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
else if(cJSON_IsNull(json_value))
{
// Bind NULL value
if(sqlite3_bind_null(stmt, i + 1) != SQLITE_OK)
{
log_err("Unable to bind NULL value to SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
else
{
log_err("Unable to bind value to SQL statement: type = %X", (unsigned int)json_value->type & 0xFF);
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
// Execute SQL statement
if(sqlite3_step(stmt) != SQLITE_DONE)
{
log_err("Unable to execute SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
// Reset SQL statement
if(sqlite3_reset(stmt) != SQLITE_OK)
{
log_err("Unable to reset SQL statement: %s", sqlite3_errmsg(db));
sqlite3_finalize(stmt);
sqlite3_close(db);
return false;
}
}
// Finalize SQL statement
if(sqlite3_finalize(stmt) != SQLITE_OK)
{
log_err("Unable to finalize SQL statement: %s", sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Commit transaction
if(sqlite3_exec(db, "COMMIT;", NULL, NULL, NULL) != SQLITE_OK)
{
log_err("Unable to commit transaction: %s", sqlite3_errmsg(db));
sqlite3_close(db);
return false;
}
// Close database connection
sqlite3_close(db);
return true;
}
static int process_received_tar_gz(struct ftl_conn *api, struct upload_data *data)
{
// Try to decompress the received data
uint8_t *archive = NULL;
mz_ulong archive_size = 0u;
if(!inflate_buffer(data->data, data->filesize, &archive, &archive_size))
{
free_upload_data(data);
return send_json_error(api, 400,
"bad_request",
"Invalid GZIP archive",
"The uploaded file does not appear to be a valid gzip archive - decompression failed");
}
// Print all files in the TAR archive if in debug mode
if(config.debug.api.v.b)
{
cJSON *json_files = list_files_in_tar(archive, archive_size);
cJSON *file = NULL;
cJSON_ArrayForEach(file, json_files)
{
const cJSON *name = cJSON_GetObjectItemCaseSensitive(file, "name");
const cJSON *size = cJSON_GetObjectItemCaseSensitive(file, "size");
if(name == NULL || size == NULL)
continue;
log_debug(DEBUG_API, "Found file in TAR archive: \"%s\" (%d bytes)",
name->valuestring, size->valueint);
}
}
// Parse JSON files in the TAR archive
cJSON *imported_files = JSON_NEW_ARRAY();
for(size_t i = 0; i < sizeof(teleporter_v5_files) / sizeof(struct teleporter_files); i++)
{
size_t fileSize = 0u;
cJSON *json = NULL;
const char *file = find_file_in_tar(archive, archive_size, teleporter_v5_files[i].filename, &fileSize);
if(file != NULL && fileSize > 0u && (json = cJSON_ParseWithLength(file, fileSize)) != NULL)
if(import_json_table(json, &teleporter_v5_files[i]))
JSON_COPY_STR_TO_ARRAY(imported_files, teleporter_v5_files[i].filename);
}
// Temporarily write further files to to disk so we can import them on restart
struct {
const char *archive_name;
const char *destination;
} extract_files[] = {
{
.archive_name = "custom.list",
.destination = DNSMASQ_CUSTOM_LIST_LEGACY
},{
.archive_name = "dhcp.leases",
.destination = DHCPLEASESFILE
},{
.archive_name = "pihole-FTL.conf",
.destination = GLOBALCONFFILE_LEGACY
},{
.archive_name = "setupVars.conf",
.destination = config.files.setupVars.v.s
}
};
for(size_t i = 0; i < sizeof(extract_files) / sizeof(*extract_files); i++)
{
size_t fileSize = 0u;
const char *file = find_file_in_tar(archive, archive_size, extract_files[i].archive_name, &fileSize);
if(file != NULL && fileSize > 0u)
{
// Write file to disk
log_info("Writing file \"%s\" (%zu bytes) to \"%s\"",
extract_files[i].archive_name, fileSize, extract_files[i].destination);
FILE *fp = fopen(extract_files[i].destination, "wb");
if(fp == NULL)
{
log_err("Unable to open file \"%s\" for writing: %s", extract_files[i].destination, strerror(errno));
continue;
}
if(fwrite(file, fileSize, 1, fp) != 1)
{
log_err("Unable to write file \"%s\": %s", extract_files[i].destination, strerror(errno));
fclose(fp);
continue;
}
fclose(fp);
JSON_COPY_STR_TO_ARRAY(imported_files, extract_files[i].destination);
}
}
// Append WEB_PORTS to setupVars.conf
FILE *fp = fopen(config.files.setupVars.v.s, "a");
if(fp == NULL)
log_err("Unable to open file \"%s\" for appending: %s", config.files.setupVars.v.s, strerror(errno));
else
{
fprintf(fp, "WEB_PORTS=%s\n", config.webserver.port.v.s);
fclose(fp);
}
// Remove pihole.toml to prevent it from being imported on restart
if(remove(GLOBALTOMLPATH) != 0)
log_err("Unable to remove file \"%s\": %s", GLOBALTOMLPATH, strerror(errno));
// Remove all rotated pihole.toml files to avoid automatic config
// restore on restart
for(unsigned int i = MAX_ROTATIONS; i > 0; i--)
{
const char *fname = GLOBALTOMLPATH;
const char *filename = basename(fname);
// extra 6 bytes is enough space for up to 999 rotations ("/", ".", "\0", "999")
const size_t buflen = strlen(filename) + strlen(BACKUP_DIR) + 6;
char *path = calloc(buflen, sizeof(char));
snprintf(path, buflen, BACKUP_DIR"/%s.%u", filename, i);
// Remove file (if it exists)
if(remove(path) != 0 && errno != ENOENT)
log_err("Unable to remove file \"%s\": %s", path, strerror(errno));
}
// Free allocated memory
free_upload_data(data);
// Signal FTL we want to restart for re-import
api->ftl.restart = true;
// Send response
cJSON *json = JSON_NEW_OBJECT();
JSON_ADD_ITEM_TO_OBJECT(json, "files", imported_files);
JSON_SEND_OBJECT(json);
}
int api_teleporter(struct ftl_conn *api)
{
if(api->method == HTTP_GET)

View File

@ -60,6 +60,12 @@
#include "tools/arp-scan.h"
// run_performance_test()
#include "config/password.h"
// idn2_to_ascii_lz()
#include <idn2.h>
// sha256sum()
#include "files.h"
// resolveHostname()
#include "resolve.h"
// defined in dnsmasq.c
extern void print_dnsmasq_version(const char *yellow, const char *green, const char *bold, const char *normal);
@ -333,7 +339,7 @@ void parse_args(int argc, char* argv[])
(strcmp(argv[1], "--read-x509") == 0 ||
strcmp(argv[1], "--read-x509-key") == 0))
{
if(argc < 2 || argc > 4)
if(argc > 4)
{
printf("Usage: %s %s [<input file>] [<domain>]\n", argv[0], argv[1]);
printf("Example: %s %s /etc/pihole/tls.pem\n", argv[0], argv[1]);
@ -384,14 +390,14 @@ void parse_args(int argc, char* argv[])
const bool antigravity = strcmp(argv[1], "antigravity") == 0;
// pihole-FTL gravity parseList <infile> <outfile> <adlistID>
if(argc == 6 && strcmp(argv[2], "parseList") == 0)
if(argc == 6 && strcasecmp(argv[2], "parseList") == 0)
{
// Parse the given list and write the result to the given file
exit(gravity_parseList(argv[3], argv[4], argv[5], false, antigravity));
}
// pihole-FTL gravity checkList <infile>
if(argc == 4 && strcmp(argv[2], "checkList") == 0)
if(argc == 4 && strcasecmp(argv[2], "checkList") == 0)
{
// Parse the given list and write the result to the given file
exit(gravity_parseList(argv[3], "", "-1", true, antigravity));
@ -427,6 +433,90 @@ void parse_args(int argc, char* argv[])
exit(run_arp_scan(scan_all, extreme_mode));
}
// IDN2 conversion mode
if(argc > 1 && strcmp(argv[1], "idn2") == 0)
{
// Enable stdout printing
cli_mode = true;
if(argc == 3)
{
// Convert unicode domain to punycode
char *punycode = NULL;
const int rc = idn2_to_ascii_lz(argv[2], &punycode, IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL);
if (rc != IDN2_OK)
{
// Invalid domain name
printf("Invalid domain name: %s\n", argv[2]);
exit(EXIT_FAILURE);
}
// Convert punycode domain to lowercase
for(unsigned int i = 0u; i < strlen(punycode); i++)
punycode[i] = tolower(punycode[i]);
printf("%s\n", punycode);
exit(EXIT_SUCCESS);
}
else if(argc == 4 && (strcmp(argv[2], "-d") == 0 || strcmp(argv[2], "--decode") == 0))
{
// Convert punycode domain to unicode
char *unicode = NULL;
const int rc = idn2_to_unicode_lzlz(argv[3], &unicode, IDN2_NFC_INPUT | IDN2_NONTRANSITIONAL);
if (rc != IDN2_OK)
{
// Invalid domain name
printf("Invalid domain name: %s\n", argv[3]);
exit(EXIT_FAILURE);
}
printf("%s\n", unicode);
exit(EXIT_SUCCESS);
}
else
{
printf("Usage: %s idn2 [--decode] <domain>\n", argv[0]);
exit(EXIT_FAILURE);
}
}
// sha256sum mode
if(argc == 3 && strcmp(argv[1], "sha256sum") == 0)
{
// Enable stdout printing
cli_mode = true;
uint8_t checksum[SHA256_DIGEST_SIZE];
if(!sha256sum(argv[2], checksum))
exit(EXIT_FAILURE);
// Convert checksum to hex string
char hex[SHA256_DIGEST_SIZE*2+1];
sha256_raw_to_hex(checksum, hex);
// Print result
printf("%s %s\n", hex, argv[2]);
exit(EXIT_SUCCESS);
}
// Local reverse name resolver
if(argc == 3 && strcasecmp(argv[1], "ptr") == 0)
{
// Enable stdout printing
cli_mode = true;
// Need to get dns.port and the resolver settings
readFTLconf(&config, false);
char *name = resolveHostname(argv[2], true);
if(name == NULL)
exit(EXIT_FAILURE);
// Print result
printf("%s\n", name);
free(name);
exit(EXIT_SUCCESS);
}
// start from 1, as argv[0] is the executable name
for(int i = 1; i < argc; i++)
{
@ -467,6 +557,21 @@ void parse_args(int argc, char* argv[])
argv2[5 + j] = argv[i + 2 + j];
exit(sqlite3_shell_main(argc2, argv2));
}
// Special non-interative mode
else if(i+1 < argc && strcmp(argv[i+1], "-ni") == 0)
{
int argc2 = argc - i + 4 - 2;
char **argv2 = calloc(argc2, sizeof(char*));
argv2[0] = argv[0]; // Application name
argv2[1] = (char*)"-batch";
argv2[2] = (char*)"-init";
argv2[3] = (char*)"/dev/null";
// i = "sqlite3"
// i+1 = "-ni"
for(int j = 0; j < argc - i - 2; j++)
argv2[4 + j] = argv[i + 2 + j];
exit(sqlite3_shell_main(argc2, argv2));
}
else
exit(sqlite3_shell_main(argc - i, &argv[i]));
}
@ -810,19 +915,30 @@ void parse_args(int argc, char* argv[])
printf(" the script.\n\n");
printf("%sEmbedded SQLite3 shell:%s\n", yellow, normal);
printf("\t%ssql %s[-h]%s, %ssqlite3 %s[-h]%s FTL's SQLite3 shell\n", green, purple, normal, green, purple, normal);
printf("\t%s-h%s starts a special %shuman-readable mode%s\n\n", purple, normal, bold, normal);
printf("\t%ssql%s, %ssqlite3%s FTL's SQLite3 shell\n", green, normal, green, normal);
printf(" Usage: %spihole-FTL sqlite3 %s[-h] %s[OPTIONS] [FILENAME] [SQL]%s\n\n", green, purple, cyan, normal);
printf(" Usage: %spihole-FTL sqlite3 %s[OPTIONS] [FILENAME] [SQL]%s\n\n", green, cyan, normal);
printf(" Options:\n\n");
printf(" - %s[OPTIONS]%s is an optional set of options. All available\n", cyan, normal);
printf(" options can be found in %spihole-FTL sqlite3 --help%s\n", green, normal);
printf(" options can be found in %spihole-FTL sqlite3 --help%s.\n", green, normal);
printf(" The first option can be either %s-h%s or %s-ni%s, see below.\n", purple, normal, purple, normal);
printf(" - %s[FILENAME]%s is the optional name of an SQLite database.\n", cyan, normal);
printf(" A new database is created if the file does not previously\n");
printf(" exist. If this argument is omitted, SQLite3 will use a\n");
printf(" transient in-memory database instead.\n");
printf(" - %s[SQL]%s is an optional SQL statement to be executed. If\n", cyan, normal);
printf(" omitted, an interactive shell is started instead.\n\n");
printf(" There are two special %spihole-FTL sqlite3%s mode switches:\n", green, normal);
printf(" %s-h%s %shuman-readable%s mode:\n", purple, normal, bold, normal);
printf(" In this mode, the output of the shell is formatted in\n");
printf(" a human-readable way. This is especially useful for\n");
printf(" debugging purposes. %s-h%s is a shortcut for\n", purple, normal);
printf(" %spihole-FTL sqlite3 %s-column -header -nullvalue '(null)'%s\n\n", green, purple, normal);
printf(" %s-ni%s %snon-interative%s mode\n", purple, normal, bold, normal);
printf(" In this mode, batch mode is enforced and any possibly\n");
printf(" existing .sqliterc file is ignored. %s-ni%s is a shortcut\n", purple, normal);
printf(" for %spihole-FTL sqlite3 %s-batch -init /dev/null%s\n\n", green, purple, normal);
printf(" Usage: %spihole-FTL sqlite3 %s-ni %s[OPTIONS] [FILENAME] [SQL]%s\n\n", green, purple, cyan, normal);
printf("%sEmbedded dnsmasq options:%s\n", yellow, normal);
printf("\t%sdnsmasq-test%s Test syntax of dnsmasq's config\n", green, normal);
@ -878,7 +994,15 @@ void parse_args(int argc, char* argv[])
printf(" per line (no HOSTS lists, etc.)\n\n");
printf(" Usage: %spihole-FTL gravity checkList %sinfile%s\n\n", green, cyan, normal);
printf("%sIDN2 conversion:%s\n", yellow, normal);
printf(" Convert a given internationalized domain name (IDN) to\n");
printf(" punycode or vice versa.\n\n");
printf(" Encoding: %spihole-FTL idn2 %sdomain%s\n", green, cyan, normal);
printf(" Decoding: %spihole-FTL idn2 -d %spunycode%s\n\n", green, cyan, normal);
printf("%sOther:%s\n", yellow, normal);
printf("\t%sptr %sIP%s Resolve IP address to hostname\n", green, cyan, normal);
printf("\t%ssha256sum %sfile%s Calculate SHA256 checksum of a file\n", green, cyan, normal);
printf("\t%sdhcp-discover%s Discover DHCP servers in the local\n", green, normal);
printf("\t network\n");
printf("\t%sarp-scan %s[-a/-x]%s Use ARP to scan local network for\n", green, cyan, normal);

View File

@ -15,12 +15,18 @@ set(sources
config.h
dnsmasq_config.c
dnsmasq_config.h
env.c
env.h
inotify.c
inotify.h
legacy_reader.c
legacy_reader.h
password.c
password.h
suggest.c
suggest.h
setupVars.c
setupVars.h
toml_writer.c
toml_writer.h
toml_reader.c

View File

@ -22,6 +22,17 @@
#include "config/password.h"
// check_capability()
#include "capabilities.h"
// suggest_closest_conf_key()
#include "config/suggest.h"
enum exit_codes {
OKAY = 0,
FAIL = 1,
VALUE_INVALID = 2,
DNSMASQ_TEST_FAILED = 3,
KEY_UNKNOWN = 4,
ENV_VAR_FORCED = 5,
} __attribute__((packed));
// Read a TOML value from a table depending on its type
static bool readStringValue(struct conf_item *conf_item, const char *value, struct config *newconf)
@ -160,8 +171,9 @@ static bool readStringValue(struct conf_item *conf_item, const char *value, stru
// Get password hash as allocated string (an empty string is hashed to an empty string)
char *pwhash = strlen(value) > 0 ? create_password(value) : strdup("");
// Verify that the password hash is valid
if(verify_password(value, pwhash, false) != PASSWORD_CORRECT)
// Verify that the password hash is either valid or empty
const enum password_result status = verify_password(value, pwhash, false);
if(status != PASSWORD_CORRECT && status != NO_PASSWORD_SET)
{
log_err("Failed to create password hash (verification failed), password remains unchanged");
free(pwhash);
@ -297,7 +309,12 @@ static bool readStringValue(struct conf_item *conf_item, const char *value, stru
case CONF_STRUCT_IN_ADDR:
{
struct in_addr addr4 = { 0 };
if(inet_pton(AF_INET, value, &addr4))
if(strlen(value) == 0)
{
// Special case: empty string -> 0.0.0.0
conf_item->v.in_addr.s_addr = INADDR_ANY;
}
else if(inet_pton(AF_INET, value, &addr4))
memcpy(&conf_item->v.in_addr, &addr4, sizeof(addr4));
else
{
@ -309,7 +326,12 @@ static bool readStringValue(struct conf_item *conf_item, const char *value, stru
case CONF_STRUCT_IN6_ADDR:
{
struct in6_addr addr6 = { 0 };
if(inet_pton(AF_INET6, value, &addr6))
if(strlen(value) == 0)
{
// Special case: empty string -> ::
memcpy(&conf_item->v.in6_addr, &in6addr_any, sizeof(in6addr_any));
}
else if(inet_pton(AF_INET6, value, &addr6))
memcpy(&conf_item->v.in6_addr, &addr6, sizeof(addr6));
else
{
@ -391,7 +413,7 @@ int set_config_from_CLI(const char *key, const char *value)
{
log_err("Config option %s is read-only (set via environmental variable)", key);
free_config(&newconf);
return 5;
return ENV_VAR_FORCED;
}
// This is the config option we are looking for
@ -407,16 +429,22 @@ int set_config_from_CLI(const char *key, const char *value)
// Check if we found the config option
if(new_item == NULL)
{
log_err("Unknown config option: %s", key);
unsigned int N = 0;
char **matches = suggest_closest_conf_key(false, key, &N);
log_err("Unknown config option %s, did you mean:", key);
for(unsigned int i = 0; i < N; i++)
log_err(" - %s", matches[i]);
free(matches);
free_config(&newconf);
return 4;
return KEY_UNKNOWN;
}
// Parse value
if(!readStringValue(new_item, value, &newconf))
{
free_config(&newconf);
return 2;
return VALUE_INVALID;
}
// Check if value changed compared to current value
@ -436,7 +464,7 @@ int set_config_from_CLI(const char *key, const char *value)
// Test failed
log_debug(DEBUG_CONFIG, "Config item %s: dnsmasq config test failed", conf_item->k);
free_config(&newconf);
return 3;
return DNSMASQ_TEST_FAILED;
}
}
else if(conf_item == &config.dns.hosts)
@ -464,20 +492,41 @@ int set_config_from_CLI(const char *key, const char *value)
putchar('\n');
writeFTLtoml(false);
return EXIT_SUCCESS;
return OKAY;
}
int get_config_from_CLI(const char *key, const bool quiet)
{
// Identify config option
struct conf_item *conf_item = NULL;
// We first loop over all config options to check if the one we are
// looking for is an exact match, use partial match otherwise
bool exactMatch = false;
for(unsigned int i = 0; i < CONFIG_ELEMENTS; i++)
{
// Get pointer to memory location of this conf_item
struct conf_item *item = get_conf_item(&config, i);
// Check if item.k is identical with key
if(strcmp(item->k, key) == 0)
{
exactMatch = true;
break;
}
}
// Loop over all config options again to find the one we are looking for
// (possibly partial match)
for(unsigned int i = 0; i < CONFIG_ELEMENTS; i++)
{
// Get pointer to memory location of this conf_item
struct conf_item *item = get_conf_item(&config, i);
// Check if item.k starts with key
if(key != NULL && strncmp(item->k, key, strlen(key)) != 0)
if(key != NULL &&
((exactMatch && strcmp(item->k, key) != 0) ||
(!exactMatch && strncmp(item->k, key, strlen(key)))))
continue;
// Skip write-only options
@ -500,16 +549,22 @@ int get_config_from_CLI(const char *key, const bool quiet)
}
// Check if we found the config option
if(key != NULL && conf_item == NULL)
if(conf_item == NULL)
{
log_err("Unknown config option: %s", key);
return 2;
unsigned int N = 0;
char **matches = suggest_closest_conf_key(false, key, &N);
log_err("Unknown config option %s, did you mean:", key);
for(unsigned int i = 0; i < N; i++)
log_err(" - %s", matches[i]);
free(matches);
return KEY_UNKNOWN;
}
// Use return status if this is a boolean value
// and we are in quiet mode
if(quiet && conf_item->t == CONF_BOOL)
return conf_item->v.b ? EXIT_SUCCESS : EXIT_FAILURE;
if(quiet && conf_item != NULL && conf_item->t == CONF_BOOL)
return conf_item->v.b ? OKAY : FAIL;
return EXIT_SUCCESS;
return OKAY;
}

View File

@ -12,7 +12,7 @@
#include "config/config.h"
#include "config/toml_reader.h"
#include "config/toml_writer.h"
#include "setupVars.h"
#include "config/setupVars.h"
#include "log.h"
#include "log.h"
// readFTLlegacy()
@ -29,9 +29,14 @@
#include "api/api.h"
// exit_code
#include "signals.h"
// getEnvVars()
#include "config/env.h"
// sha256sum()
#include "files.h"
struct config config = { 0 };
static bool config_initialized = false;
uint8_t last_checksum[SHA256_DIGEST_SIZE] = { 0 };
// Private prototypes
static bool port_in_use(const in_port_t port);
@ -193,7 +198,7 @@ struct conf_item *get_conf_item(struct config *conf, const unsigned int n)
}
// Return n-th config element
return (void*)conf + n*sizeof(struct conf_item);
return (struct conf_item *)conf + n;
}
struct conf_item *get_debug_item(struct config *conf, const enum debug_flag debug)
@ -206,7 +211,7 @@ struct conf_item *get_debug_item(struct config *conf, const enum debug_flag debu
}
// Return n-th config element
return (void*)&conf->debug + debug*sizeof(struct conf_item);
return (struct conf_item *)&conf->debug + debug;
}
unsigned int __attribute__ ((pure)) config_path_depth(char **paths)
@ -426,8 +431,8 @@ void initConfig(struct config *conf)
struct enum_options piholePTR[] =
{
{ get_ptr_type_str(PTR_NONE), "Pi-hole will not respond automatically on PTR requests to local interface addresses. Ensure pi.hole and/or hostname records exist elsewhere." },
{ get_ptr_type_str(PTR_HOSTNAME), "Pi-hole will not respond automatically on PTR requests to local interface addresses. Ensure pi.hole and/or hostname records exist elsewhere." },
{ get_ptr_type_str(PTR_HOSTNAMEFQDN), "Serve the machine's global hostname as fully qualified domain by adding the local suffix. If no local suffix has been defined, FTL appends the local domain .no_fqdn_available. In this case you should either add domain=whatever.com to a custom config file inside /etc/dnsmasq.d/ (to set whatever.com as local domain) or use domain=# which will try to derive the local domain from /etc/resolv.conf (or whatever is set with resolv-file, when multiple search directives exist, the first one is used)." },
{ get_ptr_type_str(PTR_HOSTNAME), "Serve the machine's hostname. The hostname is queried from the kernel through uname(2)->nodename. If the machine has multiple network interfaces, it can also have multiple nodenames. In this case, it is unspecified and up to the kernel which one will be returned. On Linux, the returned string is what has been set using sethostname(2) which is typically what has been set in /etc/hostname." },
{ get_ptr_type_str(PTR_HOSTNAMEFQDN), "Serve the machine's hostname (see limitations above) as fully qualified domain by adding the local domain. If no local domain has been defined (config option dns.domain), FTL tries to query the domain name from the kernel using getdomainname(2). If this fails, FTL appends \".no_fqdn_available\" to the hostname." },
{ get_ptr_type_str(PTR_PIHOLE), "Respond with \"pi.hole\"." }
};
CONFIG_ADD_ENUM_OPTIONS(conf->dns.piholePTR.a, piholePTR);
@ -494,7 +499,7 @@ void initConfig(struct config *conf)
conf->dns.dnssec.h = "Validate DNS replies using DNSSEC?";
conf->dns.dnssec.t = CONF_BOOL;
conf->dns.dnssec.f = FLAG_RESTART_FTL;
conf->dns.dnssec.d.b = true;
conf->dns.dnssec.d.b = false;
conf->dns.interface.k = "dns.interface";
conf->dns.interface.h = "Interface to use for DNS (see also dnsmasq.listening.mode) and DHCP (if enabled)";
@ -554,10 +559,10 @@ void initConfig(struct config *conf)
conf->dns.cache.size.d.ui = 10000u;
conf->dns.cache.optimizer.k = "dns.cache.optimizer";
conf->dns.cache.optimizer.h = "Query cache optimizer: If a DNS name exists in the cache, but its time-to-live has expired only recently, the data will be used anyway (a refreshing from upstream is triggered). This can improve DNS query delays especially over unreliable Internet connections. This feature comes at the expense of possibly sometimes returning out-of-date data and less efficient cache utilisation, since old data cannot be flushed when its TTL expires, so the cache becomes mostly least-recently-used. To mitigate issues caused by massively outdated DNS replies, the maximum overaging of cached records is limited. We strongly recommend staying below 86400 (1 day) with this option.";
conf->dns.cache.optimizer.t = CONF_UINT;
conf->dns.cache.optimizer.h = "Query cache optimizer: If a DNS name exists in the cache, but its time-to-live has expired only recently, the data will be used anyway (a refreshing from upstream is triggered). This can improve DNS query delays especially over unreliable Internet connections. This feature comes at the expense of possibly sometimes returning out-of-date data and less efficient cache utilization, since old data cannot be flushed when its TTL expires, so the cache becomes mostly least-recently-used. To mitigate issues caused by massively outdated DNS replies, the maximum overaging of cached records is limited. We strongly recommend staying below 86400 (1 day) with this option.\n Setting the TTL excess time to zero will serve stale cache data regardless how long it has expired. This is not recommended as it may lead to stale data being served for a long time. Setting this option to any negative value will disable this feature altogether.";
conf->dns.cache.optimizer.t = CONF_INT;
conf->dns.cache.optimizer.f = FLAG_RESTART_FTL | FLAG_ADVANCED_SETTING;
conf->dns.cache.optimizer.d.ui = 3600u;
conf->dns.cache.optimizer.d.i = 3600u;
// sub-struct dns.blocking
conf->dns.blocking.active.k = "dns.blocking.active";
@ -581,6 +586,13 @@ void initConfig(struct config *conf)
conf->dns.blocking.mode.t = CONF_ENUM_BLOCKING_MODE;
conf->dns.blocking.mode.d.blocking_mode = MODE_NULL;
conf->dns.revServers.k = "dns.revServers";
conf->dns.revServers.h = "Reverse server (former also called \"conditional forwarding\") feature\n Array of reverse servers each one in one of the following forms: \"<enabled>,<ip-address>[/<prefix-len>],<server>[#<port>],<domain>\"\n\n Individual components:\n\n <enabled>: either \"true\" or \"false\"\n\n <ip-address>[/<prefix-len>]: Address range for the reverse server feature in CIDR notation. If the prefix length is omitted, either 32 (IPv4) or 128 (IPv6) are substituted (exact address match). This is almost certainly not what you want here.\n Example: \"192.168.0.0/24\" for the range 192.168.0.1 - 192.168.0.255\n\n <server>[#<port>]: Target server to be used for the reverse server feature\n Example: \"192.168.0.1#53\"\n\n <domain>: Domain used for the reverse server feature (e.g., \"fritz.box\")\n Example: \"fritz.box\"";
conf->dns.revServers.a = cJSON_CreateStringReference("array of reverse servers each one in one of the following forms: \"<enabled>,<ip-address>[/<prefix-len>],<server>[#<port>],<domain>\", e.g., \"true,192.168.0.0/24,192.168.0.1,fritz.box\"");
conf->dns.revServers.t = CONF_JSON_STRING_ARRAY;
conf->dns.revServers.d.json = cJSON_CreateArray();
conf->dns.revServers.f = FLAG_RESTART_FTL;
// sub-struct dns.rate_limit
conf->dns.rateLimit.count.k = "dns.rateLimit.count";
conf->dns.rateLimit.count.h = "Rate-limited queries are answered with a REFUSED reply and not further processed by FTL.\n The default settings for FTL's rate-limiting are to permit no more than 1000 queries in 60 seconds. Both numbers can be customized independently. It is important to note that rate-limiting is happening on a per-client basis. Other clients can continue to use FTL while rate-limited clients are short-circuited at the same time.\n For this setting, both numbers, the maximum number of queries within a given time, and the length of the time interval (seconds) have to be specified. For instance, if you want to set a rate limit of 1 query per hour, the option should look like RATE_LIMIT=1/3600. The time interval is relative to when FTL has finished starting (start of the daemon + possible delay by DELAY_STARTUP) then it will advance in steps of the rate-limiting interval. If a client reaches the maximum number of queries it will be blocked until the end of the current interval. This will be logged to /var/log/pihole/FTL.log, e.g. Rate-limiting 10.0.1.39 for at least 44 seconds. If the client continues to send queries while being blocked already and this number of queries during the blocking exceeds the limit the client will continue to be blocked until the end of the next interval (FTL.log will contain lines like Still rate-limiting 10.0.1.39 as it made additional 5007 queries). As soon as the client requests less than the set limit, it will be unblocked (Ending rate-limitation of 10.0.1.39).\n Rate-limiting may be disabled altogether by setting both values to zero (this results in the same behavior as before FTL v5.7).\n How many queries are permitted...";
@ -656,34 +668,6 @@ void initConfig(struct config *conf)
conf->dns.reply.blocking.v6.f = FLAG_ADVANCED_SETTING;
memset(&conf->dns.reply.blocking.v6.d.in6_addr, 0, sizeof(struct in6_addr));
// sub-struct revServer
conf->dns.revServer.active.k = "dns.revServer.active";
conf->dns.revServer.active.h = "Is the reverse server (former also called \"conditional forwarding\") feature enabled?";
conf->dns.revServer.active.t = CONF_BOOL;
conf->dns.revServer.active.d.b = false;
conf->dns.revServer.active.f = FLAG_RESTART_FTL;
conf->dns.revServer.cidr.k = "dns.revServer.cidr";
conf->dns.revServer.cidr.h = "Address range for the reverse server feature in CIDR notation. If the prefix length is omitted, either 32 (IPv4) or 128 (IPv6) are substitutet (exact address match). This is almost certainly not what you want here.";
conf->dns.revServer.cidr.a = cJSON_CreateStringReference("<ip-address>[/<prefix-len>], e.g., \"192.168.0.0/24\" for the range 192.168.0.1 - 192.168.0.255");
conf->dns.revServer.cidr.t = CONF_STRING;
conf->dns.revServer.cidr.d.s = (char*)"";
conf->dns.revServer.cidr.f = FLAG_RESTART_FTL;
conf->dns.revServer.target.k = "dns.revServer.target";
conf->dns.revServer.target.h = "Target server tp be used for the reverse server feature";
conf->dns.revServer.target.a = cJSON_CreateStringReference("<server>[#<port>], e.g., \"192.168.0.1\"");
conf->dns.revServer.target.t = CONF_STRING;
conf->dns.revServer.target.d.s = (char*)"";
conf->dns.revServer.target.f = FLAG_RESTART_FTL;
conf->dns.revServer.domain.k = "dns.revServer.domain";
conf->dns.revServer.domain.h = "Domain used for the reverse server feature";
conf->dns.revServer.domain.a = cJSON_CreateStringReference("<valid domain>, typically set to the same value as dhcp.domain");
conf->dns.revServer.domain.t = CONF_STRING;
conf->dns.revServer.domain.d.s = (char*)"";
conf->dns.revServer.domain.f = FLAG_RESTART_FTL;
// sub-struct dhcp
conf->dhcp.active.k = "dhcp.active";
conf->dhcp.active.h = "Is the embedded DHCP server enabled?";
@ -693,31 +677,31 @@ void initConfig(struct config *conf)
conf->dhcp.start.k = "dhcp.start";
conf->dhcp.start.h = "Start address of the DHCP address pool";
conf->dhcp.start.a = cJSON_CreateStringReference("<ip-addr>, e.g., \"192.168.0.10\"");
conf->dhcp.start.t = CONF_STRING;
conf->dhcp.start.a = cJSON_CreateStringReference("<valid IPv4 address> or empty string (\"\"), e.g., \"192.168.0.10\"");
conf->dhcp.start.t = CONF_STRUCT_IN_ADDR;
conf->dhcp.start.f = FLAG_RESTART_FTL;
conf->dhcp.start.d.s = (char*)"";
memset(&conf->dhcp.start.d.in_addr, 0, sizeof(struct in_addr));
conf->dhcp.end.k = "dhcp.end";
conf->dhcp.end.h = "End address of the DHCP address pool";
conf->dhcp.end.a = cJSON_CreateStringReference("<ip-addr>, e.g., \"192.168.0.250\"");
conf->dhcp.end.t = CONF_STRING;
conf->dhcp.end.a = cJSON_CreateStringReference("<valid IPv4 address> or empty string (\"\"), e.g., \"192.168.0.250\"");
conf->dhcp.end.t = CONF_STRUCT_IN_ADDR;
conf->dhcp.end.f = FLAG_RESTART_FTL;
conf->dhcp.end.d.s = (char*)"";
memset(&conf->dhcp.end.d.in_addr, 0, sizeof(struct in_addr));
conf->dhcp.router.k = "dhcp.router";
conf->dhcp.router.h = "Address of the gateway to be used (typically the address of your router in a home installation)";
conf->dhcp.router.a = cJSON_CreateStringReference("<ip-addr>, e.g., \"192.168.0.1\"");
conf->dhcp.router.t = CONF_STRING;
conf->dhcp.router.a = cJSON_CreateStringReference("<valid IPv4 address> or empty string (\"\"), e.g., \"192.168.0.1\"");
conf->dhcp.router.t = CONF_STRUCT_IN_ADDR;
conf->dhcp.router.f = FLAG_RESTART_FTL;
conf->dhcp.router.d.s = (char*)"";
memset(&conf->dhcp.router.d.in_addr, 0, sizeof(struct in_addr));
conf->dhcp.domain.k = "dhcp.domain";
conf->dhcp.domain.h = "The DNS domain used by your Pi-hole (*** DEPRECATED ***)\n This setting is deprecated and will be removed in a future version. Please use dns.domain instead. Setting it to any non-default value will overwrite the value of dns.domain if it is still set to its default value.";
conf->dhcp.domain.a = cJSON_CreateStringReference("<any valid domain>");
conf->dhcp.domain.t = CONF_STRING;
conf->dhcp.domain.f = FLAG_RESTART_FTL | FLAG_ADVANCED_SETTING;
conf->dhcp.domain.d.s = (char*)"lan";
conf->dhcp.netmask.k = "dhcp.netmask";
conf->dhcp.netmask.h = "The netmask used by your Pi-hole. For directly connected networks (i.e., networks on which the machine running Pi-hole has an interface) the netmask is optional and may be set to an empty string (\"\"): it will then be determined from the interface configuration itself. For networks which receive DHCP service via a relay agent, we cannot determine the netmask itself, so it should explicitly be specified, otherwise Pi-hole guesses based on the class (A, B or C) of the network address.";
conf->dhcp.netmask.a = cJSON_CreateStringReference("<any valid netmask> (e.g., \"255.255.255.0\") or empty string (\"\") for auto-discovery");
conf->dhcp.netmask.t = CONF_STRUCT_IN_ADDR;
conf->dhcp.netmask.f = FLAG_RESTART_FTL | FLAG_ADVANCED_SETTING;
memset(&conf->dhcp.netmask.d.in_addr, 0, sizeof(struct in_addr));
conf->dhcp.leaseTime.k = "dhcp.leaseTime";
conf->dhcp.leaseTime.h = "If the lease time is given, then leases will be given for that length of time. If not given, the default lease time is one hour for IPv4 and one day for IPv6.";
@ -826,6 +810,7 @@ void initConfig(struct config *conf)
conf->webserver.domain.h = "On which domain is the web interface served?";
conf->webserver.domain.a = cJSON_CreateStringReference("<valid domain>");
conf->webserver.domain.t = CONF_STRING;
conf->webserver.domain.f = FLAG_ADVANCED_SETTING | FLAG_RESTART_FTL;
conf->webserver.domain.d.s = (char*)"pi.hole";
conf->webserver.acl.k = "webserver.acl";
@ -836,7 +821,7 @@ void initConfig(struct config *conf)
conf->webserver.acl.d.s = (char*)"";
conf->webserver.port.k = "webserver.port";
conf->webserver.port.h = "Ports to be used by the webserver.\n Comma-separated list of ports to listen on. It is possible to specify an IP address to bind to. In this case, an IP address and a colon must be prepended to the port number. For example, to bind to the loopback interface on port 80 (IPv4) and to all interfaces port 8080 (IPv4), use \"127.0.0.1:80,8080\". \"[::]:80\" can be used to listen to IPv6 connections to port 80. IPv6 addresses of network interfaces can be specified as well, e.g. \"[::1]:80\" for the IPv6 loopback interface. [::]:80 will bind to port 80 IPv6 only.\n In order to use port 80 for all interfaces, both IPv4 and IPv6, use either the configuration \"80,[::]:80\" (create one socket for IPv4 and one for IPv6 only), or \"+80\" (create one socket for both, IPv4 and IPv6). The + notation to use IPv4 and IPv6 will only work if no network interface is specified. Depending on your operating system version and IPv6 network environment, some configurations might not work as expected, so you have to test to find the configuration most suitable for your needs. In case \"+80\" does not work for your environment, you need to use \"80,[::]:80\".\n If the port is TLS/SSL, a letter 's' must be appended, for example, \"80,443s\" will open port 80 and port 443, and connections on port 443 will be encrypted. For non-encrypted ports, it is allowed to append letter 'r' (as in redirect). Redirected ports will redirect all their traffic to the first configured SSL port. For example, if webserver.port is \"80r,443s\", then all HTTP traffic coming at port 80 will be redirected to HTTPS port 443.";
conf->webserver.port.h = "Ports to be used by the webserver.\n Comma-separated list of ports to listen on. It is possible to specify an IP address to bind to. In this case, an IP address and a colon must be prepended to the port number. For example, to bind to the loopback interface on port 80 (IPv4) and to all interfaces port 8080 (IPv4), use \"127.0.0.1:80,8080\". \"[::]:80\" can be used to listen to IPv6 connections to port 80. IPv6 addresses of network interfaces can be specified as well, e.g. \"[::1]:80\" for the IPv6 loopback interface. [::]:80 will bind to port 80 IPv6 only.\n In order to use port 80 for all interfaces, both IPv4 and IPv6, use either the configuration \"80,[::]:80\" (create one socket for IPv4 and one for IPv6 only), or \"+80\" (create one socket for both, IPv4 and IPv6). The + notation to use IPv4 and IPv6 will only work if no network interface is specified. Depending on your operating system version and IPv6 network environment, some configurations might not work as expected, so you have to test to find the configuration most suitable for your needs. In case \"+80\" does not work for your environment, you need to use \"80,[::]:80\".\n If the port is TLS/SSL, a letter 's' must be appended, for example, \"80,443s\" will open port 80 and port 443, and connections on port 443 will be encrypted. For non-encrypted ports, it is allowed to append letter 'r' (as in redirect). Redirected ports will redirect all their traffic to the first configured SSL port. For example, if webserver.port is \"80r,443s\", then all HTTP traffic coming at port 80 will be redirected to HTTPS port 443. If this value is not set (empty string), the web server will not be started and, hence, the API will not be available.";
conf->webserver.port.a = cJSON_CreateStringReference("comma-separated list of <[ip_address:]port>");
conf->webserver.port.f = FLAG_ADVANCED_SETTING | FLAG_RESTART_FTL;
conf->webserver.port.t = CONF_STRING;
@ -858,7 +843,7 @@ void initConfig(struct config *conf)
conf->webserver.session.timeout.k = "webserver.session.timeout";
conf->webserver.session.timeout.h = "Session timeout in seconds. If a session is inactive for more than this time, it will be terminated. Sessions are continuously refreshed by the web interface, preventing sessions from timing out while the web interface is open.\n This option may also be used to make logins persistent for long times, e.g. 86400 seconds (24 hours), 604800 seconds (7 days) or 2592000 seconds (30 days). Note that the total number of concurrent sessions is limited so setting this value too high may result in users being rejected and unable to log in if there are already too many sessions active.";
conf->webserver.session.timeout.t = CONF_UINT;
conf->webserver.session.timeout.d.ui = 300u;
conf->webserver.session.timeout.d.ui = 1800u;
conf->webserver.session.restore.k = "webserver.session.restore";
conf->webserver.session.restore.h = "Should Pi-hole backup and restore sessions from the database? This is useful if you want to keep your sessions after a restart of the web interface.";
@ -952,14 +937,14 @@ void initConfig(struct config *conf)
conf->webserver.api.app_pwhash.d.s = (char*)"";
conf->webserver.api.excludeClients.k = "webserver.api.excludeClients";
conf->webserver.api.excludeClients.h = "Array of clients to be excluded from certain API responses\n Example: [ \"192.168.2.56\", \"fe80::341\", \"localhost\" ]";
conf->webserver.api.excludeClients.a = cJSON_CreateStringReference("array of IP addresses and/or hostnames");
conf->webserver.api.excludeClients.h = "Array of clients to be excluded from certain API responses (regex):\n - Query Log (/api/queries)\n - Top Clients (/api/stats/top_clients)\n This setting accepts both IP addresses (IPv4 and IPv6) as well as hostnames.\n Note that backslashes \"\\\" need to be escaped, i.e. \"\\\\\" in this setting\n\n Example: [ \"^192\\\\.168\\\\.2\\\\.56$\", \"^fe80::341:[0-9a-f]*$\", \"^localhost$\" ]";
conf->webserver.api.excludeClients.a = cJSON_CreateStringReference("array of regular expressions describing clients");
conf->webserver.api.excludeClients.t = CONF_JSON_STRING_ARRAY;
conf->webserver.api.excludeClients.d.json = cJSON_CreateArray();
conf->webserver.api.excludeDomains.k = "webserver.api.excludeDomains";
conf->webserver.api.excludeDomains.h = "Array of domains to be excluded from certain API responses\n Example: [ \"google.de\", \"pi-hole.net\" ]";
conf->webserver.api.excludeDomains.a = cJSON_CreateStringReference("array of IP addresses and/or hostnames");
conf->webserver.api.excludeDomains.h = "Array of domains to be excluded from certain API responses (regex):\n - Query Log (/api/queries)\n - Top Clients (/api/stats/top_domains)\n Note that backslashes \"\\\" need to be escaped, i.e. \"\\\\\" in this setting\n\n Example: [ \"(^|\\\\.)\\\\.google\\\\.de$\", \"\\\\.pi-hole\\\\.net$\" ]";
conf->webserver.api.excludeDomains.a = cJSON_CreateStringReference("array of regular expressions describing domains");
conf->webserver.api.excludeDomains.t = CONF_JSON_STRING_ARRAY;
conf->webserver.api.excludeDomains.d.json = cJSON_CreateArray();
@ -968,6 +953,11 @@ void initConfig(struct config *conf)
conf->webserver.api.maxHistory.t = CONF_UINT;
conf->webserver.api.maxHistory.d.ui = MAXLOGAGE*3600;
conf->webserver.api.maxClients.k = "webserver.api.maxClients";
conf->webserver.api.maxClients.h = "Up to how many clients should be returned in the activity graph endpoint (/api/history/clients)?\n This setting can be overwritten at run-time using the parameter N";
conf->webserver.api.maxClients.t = CONF_UINT16;
conf->webserver.api.maxClients.d.u16 = 10;
conf->webserver.api.allow_destructive.k = "webserver.api.allow_destructive";
conf->webserver.api.allow_destructive.h = "Allow destructive API calls (e.g. deleting all queries, powering off the system, ...)";
conf->webserver.api.allow_destructive.t = CONF_BOOL;
@ -1016,6 +1006,13 @@ void initConfig(struct config *conf)
conf->files.gravity.f = FLAG_ADVANCED_SETTING | FLAG_RESTART_FTL;
conf->files.gravity.d.s = (char*)"/etc/pihole/gravity.db";
conf->files.gravity_tmp.k = "files.gravity_tmp";
conf->files.gravity_tmp.h = "A temporary directory where Pi-hole can store files during gravity updates. This directory must be writable by the user running gravity (typically pihole).";
conf->files.gravity_tmp.a = cJSON_CreateStringReference("<any existing world-writable writable directory>");
conf->files.gravity_tmp.t = CONF_STRING;
conf->files.gravity_tmp.f = FLAG_ADVANCED_SETTING | FLAG_RESTART_FTL;
conf->files.gravity_tmp.d.s = (char*)"/tmp";
conf->files.macvendor.k = "files.macvendor";
conf->files.macvendor.h = "The database containing MAC -> Vendor information for the network table";
conf->files.macvendor.a = cJSON_CreateStringReference("<any Pi-hole macvendor database>");
@ -1101,6 +1098,12 @@ void initConfig(struct config *conf)
conf->misc.dnsmasq_lines.f = FLAG_ADVANCED_SETTING | FLAG_RESTART_FTL;
conf->misc.dnsmasq_lines.d.json = cJSON_CreateArray();
conf->misc.extraLogging.k = "misc.extraLogging";
conf->misc.extraLogging.h = "Log additional information about queries and replies to pihole.log\n When this setting is enabled, the log has extra information at the start of each line. This consists of a serial number which ties together the log lines associated with an individual query, and the IP address of the requestor. This setting is only effective if dns.queryLogging is enabled, too. This option is only useful for debugging and is not recommended for normal use.";
conf->misc.extraLogging.t = CONF_BOOL;
conf->misc.extraLogging.f = FLAG_RESTART_FTL;
conf->misc.extraLogging.d.b = false;
// sub-struct misc.check
conf->misc.check.load.k = "misc.check.load";
conf->misc.check.load.h = "Pi-hole is very lightweight on resources. Nevertheless, this does not mean that you should run Pi-hole on a server that is otherwise extremely busy as queuing on the system can lead to unnecessary delays in DNS operation as the system becomes less and less usable as the system load increases because all resources are permanently in use. To account for this, FTL regularly checks the system load. To bring this to your attention, FTL warns about excessive load when the 15 minute system load average exceeds the number of cores.\n This check can be disabled with this setting.";
@ -1174,7 +1177,7 @@ void initConfig(struct config *conf)
conf->debug.regex.d.b = false;
conf->debug.api.k = "debug.api";
conf->debug.api.h = "Print extra debugging information during telnet API calls. Currently only used to send extra information when getting all queries.";
conf->debug.api.h = "Print extra debugging information concerning API calls. This includes the request, the request parameters, and the internal details about how the algorithms decide which data to present and in what form. This very verbose output should only be used when debugging specific API issues and can be helpful, e.g., when a client cannot connect due to an obscure API error. Furthermore, this setting enables logging of all API requests (auth log) and details about user authentication attempts.";
conf->debug.api.t = CONF_BOOL;
conf->debug.api.f = FLAG_ADVANCED_SETTING;
conf->debug.api.d.b = false;
@ -1296,60 +1299,110 @@ void initConfig(struct config *conf)
// Initialize config value with default one for all *except* the log file path
if(conf_item != &conf->files.log.ftl)
{
if(conf_item->t == CONF_JSON_STRING_ARRAY)
// JSON objects really need to be duplicated as the config
// structure stores only a pointer to memory somewhere else
conf_item->v.json = cJSON_Duplicate(conf_item->d.json, true);
else if(conf_item->t == CONF_STRING_ALLOCATED)
// Allocated string: Make our own copy
conf_item->v.s = strdup(conf_item->d.s);
else
// Ordinary value: Simply copy the union over
memcpy(&conf_item->v, &conf_item->d, sizeof(conf_item->d));
}
reset_config(conf_item);
// Parse and split paths
conf_item->p = gen_config_path(conf_item->k, '.');
// Initialize environment variable name
// Allocate memory for config key + prefix (sizeof includes the trailing '\0')
const size_t envkey_size = strlen(conf_item->k) + sizeof(FTLCONF_PREFIX);
conf_item->e = calloc(envkey_size, sizeof(char));
// Build env key to look for
strcpy(conf_item->e, FTLCONF_PREFIX);
strcat(conf_item->e, conf_item->k);
// Replace all "." by "_" as this is the convention used in v5.x and earlier
for(unsigned int j = 0; j < envkey_size - 1; j++)
if(conf_item->e[j] == '.')
conf_item->e[j] = '_';
// Verify all config options are defined above
if(!conf_item->p || !conf_item->k || !conf_item->h)
if(!conf_item->p || !conf_item->k || !conf_item->h || !conf_item->e || conf_item->t == 0)
{
log_err("Config option %u/%u is not set!", i, (unsigned int)CONFIG_ELEMENTS);
log_err("Config option %u/%u is not fully configured!", i, (unsigned int)CONFIG_ELEMENTS);
continue;
}
// Verify that all config options have a type
if(conf_item->t == 0)
// Verify we have no default string pointers to NULL
if((conf_item->t == CONF_STRING || conf_item->t == CONF_STRING_ALLOCATED) && conf_item->d.s == NULL)
{
log_err("Config option %s has no type!", conf_item->k);
log_err("Config option %s has NULL default string!", conf_item->k);
continue;
}
// Verify we have no default JSON pointers to NULL
if(conf_item->t == CONF_JSON_STRING_ARRAY && conf_item->d.json == NULL)
{
log_err("Config option %s has NULL default JSON array!", conf_item->k);
continue;
}
}
}
void readFTLconf(struct config *conf, const bool rewrite)
void reset_config(struct conf_item *conf_item)
{
if(conf_item->t == CONF_JSON_STRING_ARRAY)
{
// Free allocated memory (if any)
if(conf_item->v.json != NULL)
cJSON_Delete(conf_item->v.json);
// JSON objects really need to be duplicated as the config
// structure stores only a pointer to memory somewhere else
conf_item->v.json = cJSON_Duplicate(conf_item->d.json, true);
}
else if(conf_item->t == CONF_STRING_ALLOCATED)
{
// Free allocated memory (if any)
if(conf_item->v.s != NULL)
free(conf_item->v.s);
// Allocated string: Make our own copy
conf_item->v.s = strdup(conf_item->d.s);
}
else
{
// Ordinary value: Simply copy the union over
memcpy(&conf_item->v, &conf_item->d, sizeof(conf_item->d));
}
}
bool readFTLconf(struct config *conf, const bool rewrite)
{
// Initialize config with default values
initConfig(conf);
// First try to read TOML config file
if(readFTLtoml(NULL, conf, NULL, rewrite, NULL))
// First, read the environment
getEnvVars();
// Try to read TOML config file
// If we cannot parse /etc/pihole.toml (due to missing or invalid syntax),
// we try to read the rotated files in /etc/pihole/config_backup starting at
// the most recent one and going back in time until we find a valid config
for(unsigned int i = 0; i < MAX_ROTATIONS; i++)
{
// If successful, we write the config file back to disk
// to ensure that all options are present and comments
// about options deviating from the default are present
if(rewrite)
if(readFTLtoml(NULL, conf, NULL, rewrite, NULL, i))
{
writeFTLtoml(true);
write_dnsmasq_config(conf, false, NULL);
write_custom_list();
// If successful, we write the config file back to disk
// to ensure that all options are present and comments
// about options deviating from the default are present
if(rewrite)
{
writeFTLtoml(true);
write_dnsmasq_config(conf, false, NULL);
write_custom_list();
}
return true;
}
return;
}
// On error, try to read legacy (pre-v6.0) config file. If successful,
// we move the legacy config file out of our way
log_info("No config file nor backup available, using defaults");
// If no previous config file could be read, we are likely either running
// for the first time or we are upgrading from a version prior to v6.0
// In this case, we try to read the legacy config files
const char *path = "";
if((path = readFTLlegacy(conf)) != NULL)
{
@ -1378,41 +1431,46 @@ void readFTLconf(struct config *conf, const bool rewrite)
rename(GLOBALTOMLPATH, new_name);
}
// Determine default webserver ports
// Check if ports 80/TCP and 443/TCP are already in use
const in_port_t http_port = port_in_use(80) ? 8080 : 80;
const in_port_t https_port = port_in_use(443) ? 8443 : 443;
// Create a string with the default ports
// Allocate memory for the string
char *ports = calloc(32, sizeof(char));
if(ports == NULL)
// Determine default webserver ports if not imported from setupVars.conf
if(!(config.webserver.port.f & FLAG_CONF_IMPORTED))
{
log_err("Unable to allocate memory for default ports string");
return;
// Check if ports 80/TCP and 443/TCP are already in use
const in_port_t http_port = port_in_use(80) ? 8080 : 80;
const in_port_t https_port = port_in_use(443) ? 8443 : 443;
// Create a string with the default ports
// Allocate memory for the string
char *ports = calloc(32, sizeof(char));
if(ports == NULL)
{
log_err("Unable to allocate memory for default ports string");
return false;
}
// Create the string
snprintf(ports, 32, "%d,%ds", http_port, https_port);
// Append IPv6 ports if IPv6 is enabled
const bool have_ipv6 = ipv6_enabled();
if(have_ipv6)
snprintf(ports + strlen(ports), 32 - strlen(ports),
",[::]:%d,[::]:%ds", http_port, https_port);
// Set default values for webserver ports
if(conf->webserver.port.t == CONF_STRING_ALLOCATED)
free(conf->webserver.port.v.s);
conf->webserver.port.v.s = ports;
conf->webserver.port.t = CONF_STRING_ALLOCATED;
log_info("Initialised webserver ports at %d (HTTP) and %d (HTTPS), IPv6 support is %s",
http_port, https_port, have_ipv6 ? "enabled" : "disabled");
}
// Create the string
snprintf(ports, 32, "%d,%ds", http_port, https_port);
// Append IPv6 ports if IPv6 is enabled
const bool have_ipv6 = ipv6_enabled();
if(have_ipv6)
snprintf(ports + strlen(ports), 32 - strlen(ports),
",[::]:%d,[::]:%ds", http_port, https_port);
// Set default values for webserver ports
if(conf->webserver.port.t == CONF_STRING_ALLOCATED)
free(conf->webserver.port.v.s);
conf->webserver.port.v.s = ports;
conf->webserver.port.t = CONF_STRING_ALLOCATED;
log_info("Initialised webserver ports at %d (HTTP) and %d (HTTPS), IPv6 support is %s",
http_port, https_port, have_ipv6 ? "enabled" : "disabled");
// Initialize the TOML config file
writeFTLtoml(true);
write_dnsmasq_config(conf, false, NULL);
write_custom_list();
return false;
}
bool getLogFilePath(void)
@ -1521,12 +1579,29 @@ void replace_config(struct config *newconf)
void reread_config(void)
{
// Create checksum of config file
uint8_t checksum[SHA256_DIGEST_SIZE];
if(!sha256sum(GLOBALTOMLPATH, checksum))
{
log_err("Unable to create checksum of %s, not re-reading config file", GLOBALTOMLPATH);
return;
}
// Compare checksums
if(memcmp(checksum, last_checksum, SHA256_DIGEST_SIZE) == 0)
{
log_debug(DEBUG_CONFIG, "Checksum of %s has not changed, not re-reading config file", GLOBALTOMLPATH);
return;
}
log_info("Reloading config due to pihole.toml change");
struct config conf_copy;
duplicate_config(&conf_copy, &config);
// Read TOML config file
bool restart = false;
if(readFTLtoml(&config, &conf_copy, NULL, true, &restart))
if(readFTLtoml(&config, &conf_copy, NULL, true, &restart, 0))
{
// Install new configuration
log_debug(DEBUG_CONFIG, "Loaded configuration is valid, installing it");
@ -1554,7 +1629,7 @@ void reread_config(void)
else
{
// New configuration is invalid, restore old one
log_debug(DEBUG_CONFIG, "Loaded configuration is invalid, restoring old one");
log_debug(DEBUG_CONFIG, "Modified config file is invalid, discarding and overwriting with current configuration");
free_config(&conf_copy);
}

View File

@ -33,6 +33,14 @@
// This static string represents an unchanged password
#define PASSWORD_VALUE "********"
// Remove the following line to disable the use of UTF-8 in the config file
// As consequence, the config file will be written in ASCII and all non-ASCII
// characters will be replaced by their UTF-8 escape sequences (UCS-2)
#define TOML_UTF8
// Location of the legacy (pre-v6.0) config file
#define GLOBALCONFFILE_LEGACY "/etc/pihole/pihole-FTL.conf"
union conf_value {
bool b; // boolean value
int i; // integer value
@ -89,10 +97,12 @@ enum conf_type {
#define FLAG_INVALIDATE_SESSIONS (1 << 3)
#define FLAG_WRITE_ONLY (1 << 4)
#define FLAG_ENV_VAR (1 << 5)
#define FLAG_CONF_IMPORTED (1 << 6)
struct conf_item {
const char *k; // item Key
char **p; // item Path
char *e; // item Environment variable
const char *h; // Help text / description
cJSON *a; // JSON array or object of Allowed values (where applicable)
enum conf_type t; // variable Type
@ -134,6 +144,7 @@ struct config {
struct conf_item queryLogging;
struct conf_item cnameRecords;
struct conf_item port;
struct conf_item revServers;
struct {
struct conf_item size;
struct conf_item optimizer;
@ -164,13 +175,6 @@ struct config {
struct conf_item count;
struct conf_item interval;
} rateLimit;
struct {
struct conf_item active;
struct conf_item cidr;
struct conf_item target;
struct conf_item domain;
} revServer;
} dns;
struct {
@ -178,7 +182,7 @@ struct config {
struct conf_item start;
struct conf_item end;
struct conf_item router;
struct conf_item domain;
struct conf_item netmask;
struct conf_item leaseTime;
struct conf_item ipv6;
struct conf_item rapidCommit;
@ -236,6 +240,7 @@ struct config {
struct conf_item excludeClients;
struct conf_item excludeDomains;
struct conf_item maxHistory;
struct conf_item maxClients;
struct conf_item allow_destructive;
struct {
struct conf_item limit;
@ -248,6 +253,7 @@ struct config {
struct conf_item pid;
struct conf_item database;
struct conf_item gravity;
struct conf_item gravity_tmp;
struct conf_item macvendor;
struct conf_item setupVars;
struct conf_item pcap;
@ -265,6 +271,7 @@ struct config {
struct conf_item addr2line;
struct conf_item etc_dnsmasq_d;
struct conf_item dnsmasq_lines;
struct conf_item extraLogging;
struct {
struct conf_item load;
struct conf_item shmem;
@ -317,7 +324,8 @@ extern struct config config;
void set_debug_flags(struct config *conf);
void set_all_debug(struct config *conf, const bool status);
void initConfig(struct config *conf);
void readFTLconf(struct config *conf, const bool rewrite);
void reset_config(struct conf_item *conf_item);
bool readFTLconf(struct config *conf, const bool rewrite);
bool getLogFilePath(void);
struct conf_item *get_conf_item(struct config *conf, const unsigned int n);
struct conf_item *get_debug_item(struct config *conf, const enum debug_flag debug);

View File

@ -23,7 +23,7 @@
// directory_exists()
#include "files.h"
// trim_whitespace()
#include "setupVars.h"
#include "config/setupVars.h"
// run_dnsmasq_main()
#include "args.h"
// optind
@ -222,6 +222,73 @@ static void write_config_header(FILE *fp, const char *description)
bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_config, char errbuf[ERRBUF_SIZE])
{
// Early config checks
if(conf->dhcp.active.v.b)
{
// Check if the addresses are valid
// The addresses should neither be 0.0.0.0 nor 255.255.255.255
if((ntohl(conf->dhcp.start.v.in_addr.s_addr) == 0) ||
(ntohl(conf->dhcp.start.v.in_addr.s_addr) == 0xFFFFFFFF))
{
strncpy(errbuf, "DHCP start address is not valid", ERRBUF_SIZE);
log_err("Unable to update dnsmasq configuration: %s", errbuf);
return false;
}
if((ntohl(conf->dhcp.end.v.in_addr.s_addr) == 0) ||
(ntohl(conf->dhcp.end.v.in_addr.s_addr) == 0xFFFFFFFF))
{
strncpy(errbuf, "DHCP end address is not valid", ERRBUF_SIZE);
log_err("Unable to update dnsmasq configuration: %s", errbuf);
return false;
}
if((ntohl(conf->dhcp.router.v.in_addr.s_addr) == 0) ||
(ntohl(conf->dhcp.router.v.in_addr.s_addr) == 0xFFFFFFFF))
{
strncpy(errbuf, "DHCP router address is not valid", ERRBUF_SIZE);
log_err("Unable to update dnsmasq configuration: %s", errbuf);
return false;
}
// The addresses should neither end in .0 or .255 in the last octet
if((ntohl(conf->dhcp.start.v.in_addr.s_addr) & 0xFF) == 0 ||
(ntohl(conf->dhcp.start.v.in_addr.s_addr) & 0xFF) == 0xFF)
{
strncpy(errbuf, "DHCP start address is not valid", ERRBUF_SIZE);
log_err("Unable to update dnsmasq configuration: %s", errbuf);
return false;
}
if((ntohl(conf->dhcp.end.v.in_addr.s_addr) & 0xFF) == 0 ||
(ntohl(conf->dhcp.end.v.in_addr.s_addr) & 0xFF) == 0xFF)
{
strncpy(errbuf, "DHCP end address is not valid", ERRBUF_SIZE);
log_err("Unable to update dnsmasq configuration: %s", errbuf);
return false;
}
if((ntohl(conf->dhcp.router.v.in_addr.s_addr) & 0xFF) == 0 ||
(ntohl(conf->dhcp.router.v.in_addr.s_addr) & 0xFF) == 0xFF)
{
strncpy(errbuf, "DHCP router address is not valid", ERRBUF_SIZE);
log_err("Unable to update dnsmasq configuration: %s", errbuf);
return false;
}
// Check if the DHCP range is valid (start needs to be smaller than end)
if(ntohl(conf->dhcp.start.v.in_addr.s_addr) >= ntohl(conf->dhcp.end.v.in_addr.s_addr))
{
strncpy(errbuf, "DHCP range start address is larger than or equal to the end address", ERRBUF_SIZE);
log_err("Unable to update dnsmasq configuration: %s", errbuf);
return false;
}
// Check if the router address is within the DHCP range
if(ntohl(conf->dhcp.router.v.in_addr.s_addr) >= ntohl(conf->dhcp.start.v.in_addr.s_addr) &&
ntohl(conf->dhcp.router.v.in_addr.s_addr) <= ntohl(conf->dhcp.end.v.in_addr.s_addr))
{
strncpy(errbuf, "DHCP router address should not be within DHCP range", ERRBUF_SIZE);
log_err("Unable to update dnsmasq configuration: %s", errbuf);
return false;
}
}
log_debug(DEBUG_CONFIG, "Opening "DNSMASQ_TEMP_CONF" for writing");
FILE *pihole_conf = fopen(DNSMASQ_TEMP_CONF, "w");
// Return early if opening failed
@ -240,7 +307,6 @@ bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_
}
write_config_header(pihole_conf, "Dnsmasq config for Pi-hole's FTLDNS");
fputs("addn-hosts=/etc/pihole/local.list\n", pihole_conf);
fputs("hostsdir="DNSMASQ_HOSTSDIR"\n", pihole_conf);
fputs("\n", pihole_conf);
fputs("# Don't read /etc/resolv.conf. Get upstream servers only from the configuration\n", pihole_conf);
@ -278,7 +344,10 @@ bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_
if(conf->dns.queryLogging.v.b)
{
fputs("# Enable query logging\n", pihole_conf);
fputs("log-queries\n", pihole_conf);
if(conf->misc.extraLogging.v.b)
fputs("log-queries=extra\n", pihole_conf);
else
fputs("log-queries\n", pihole_conf);
fputs("log-async\n", pihole_conf);
fputs("\n", pihole_conf);
}
@ -341,10 +410,11 @@ bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_
fputs("\n", pihole_conf);
}
if(conf->dns.cache.optimizer.v.ui > 0u)
if(conf->dns.cache.optimizer.v.i > -1)
{
fputs("# Use stale cache entries for a given number of seconds to optimize cache utilization\n", pihole_conf);
fprintf(pihole_conf, "use-stale-cache=%u\n", conf->dns.cache.optimizer.v.ui);
fputs("# Setting the time to zero will serve stale cache data regardless how long it has expired.\n", pihole_conf);
fprintf(pihole_conf, "use-stale-cache=%i\n", conf->dns.cache.optimizer.v.i);
fputs("\n", pihole_conf);
}
@ -379,24 +449,52 @@ bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_
}
fputs("\n", pihole_conf);
if(conf->dns.revServer.active.v.b)
const unsigned int revServers = cJSON_GetArraySize(conf->dns.revServers.v.json);
for(unsigned int i = 0; i < revServers; i++)
{
fputs("# Reverse server setting\n", pihole_conf);
fprintf(pihole_conf, "rev-server=%s,%s\n",
conf->dns.revServer.cidr.v.s, conf->dns.revServer.target.v.s);
cJSON *revServer = cJSON_GetArrayItem(conf->dns.revServers.v.json, i);
// Split comma-separated string into its components
char *copy = strdup(revServer->valuestring);
char *active = strtok(copy, ",");
char *cidr = strtok(NULL, ",");
char *target = strtok(NULL, ",");
char *domain = strtok(NULL, ",");
// Skip inactive reverse servers
if(active != NULL &&
strcmp(active, "true") != 0 &&
strcmp(active, "1") != 0)
{
log_debug(DEBUG_CONFIG, "Skipping inactive reverse server: %s", revServer->valuestring);
free(copy);
continue;
}
if(active == NULL || cidr == NULL || target == NULL || domain == NULL)
{
log_err("Invalid reverse server string: %s", revServer->valuestring);
free(copy);
continue;
}
fprintf(pihole_conf, "# Reverse server setting (%u%s server)\n",
i+1, get_ordinal_suffix(i+1));
fprintf(pihole_conf, "rev-server=%s,%s\n", cidr, target);
// If we have a reverse domain, we forward all queries to this domain to
// the same destination
if(strlen(conf->dns.revServer.domain.v.s) > 0)
fprintf(pihole_conf, "server=/%s/%s\n",
conf->dns.revServer.domain.v.s, conf->dns.revServer.target.v.s);
if(strlen(domain) > 0)
fprintf(pihole_conf, "server=/%s/%s\n", domain, target);
// Forward unqualified names to the target only when the "never forward
// non-FQDN" option is NOT ticked
if(!conf->dns.domainNeeded.v.b)
fprintf(pihole_conf, "server=//%s\n",
conf->dns.revServer.target.v.s);
fprintf(pihole_conf, "server=//%s\n", target);
fputs("\n", pihole_conf);
// Free copy of string
free(copy);
}
// When there is a Pi-hole domain set and "Never forward non-FQDNs" is
@ -427,12 +525,25 @@ bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_
fputs("# DHCP server setting\n", pihole_conf);
fputs("dhcp-authoritative\n", pihole_conf);
fputs("dhcp-leasefile="DHCPLEASESFILE"\n", pihole_conf);
fprintf(pihole_conf, "dhcp-range=%s,%s,%s\n",
conf->dhcp.start.v.s,
conf->dhcp.end.v.s,
conf->dhcp.leaseTime.v.s);
fprintf(pihole_conf, "dhcp-option=option:router,%s\n",
conf->dhcp.router.v.s);
char start[INET_ADDRSTRLEN] = { 0 },
end[INET_ADDRSTRLEN] = { 0 },
router[INET_ADDRSTRLEN] = { 0 };
inet_ntop(AF_INET, &conf->dhcp.start.v.in_addr, start, INET_ADDRSTRLEN);
inet_ntop(AF_INET, &conf->dhcp.end.v.in_addr, end, INET_ADDRSTRLEN);
inet_ntop(AF_INET, &conf->dhcp.router.v.in_addr, router, INET_ADDRSTRLEN);
fprintf(pihole_conf, "dhcp-range=%s,%s", start, end);
// Net mask is optional, only add if it is not 0.0.0.0
const struct in_addr inaddr_empty = {0};
if(memcmp(&conf->dhcp.netmask.v.in_addr, &inaddr_empty, sizeof(inaddr_empty)) != 0)
{
char netmask[INET_ADDRSTRLEN] = { 0 };
inet_ntop(AF_INET, &conf->dhcp.netmask.v.in_addr, netmask, INET_ADDRSTRLEN);
fprintf(pihole_conf, ",%s", netmask);
}
// Lease time is optional, only add it if it is set
if(strlen(conf->dhcp.leaseTime.v.s) > 0)
fprintf(pihole_conf, ",%s", conf->dhcp.leaseTime.v.s);
fprintf(pihole_conf, "\ndhcp-option=option:router,%s\n", router);
if(conf->dhcp.rapidCommit.v.b)
fputs("dhcp-rapid-commit\n", pihole_conf);
@ -547,6 +658,15 @@ bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_
}
}
// Add ANY filtering
fputs("# RFC 8482: Providing Minimal-Sized Responses to DNS Queries That Have QTYPE=ANY\n", pihole_conf);
fputs("# Filters replies to queries for type ANY. Everything other than A, AAAA, MX and CNAME\n", pihole_conf);
fputs("# records are removed. Since ANY queries with forged source addresses can be used in DNS amplification attacks\n", pihole_conf);
fputs("# replies to ANY queries can be large) this defangs such attacks, whilst still supporting the\n", pihole_conf);
fputs("# one remaining possible use of ANY queries. See RFC 8482 para 4.3 for details.\n", pihole_conf);
fputs("filter-rr=ANY\n", pihole_conf);
fputs("\n", pihole_conf);
// Add additional config lines to disk (if present)
if(conf->misc.dnsmasq_lines.v.json != NULL &&
cJSON_GetArraySize(conf->misc.dnsmasq_lines.v.json) > 0)
@ -575,6 +695,13 @@ bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_
return false;
}
// Close file
if(fclose(pihole_conf) != 0)
{
log_err("Cannot close dnsmasq config file: %s", strerror(errno));
return false;
}
log_debug(DEBUG_CONFIG, "Testing "DNSMASQ_TEMP_CONF);
if(test_config && !test_dnsmasq_config(errbuf))
{
@ -582,18 +709,26 @@ bool __attribute__((const)) write_dnsmasq_config(struct config *conf, bool test_
return false;
}
log_debug(DEBUG_CONFIG, "Installing "DNSMASQ_TEMP_CONF" to "DNSMASQ_PH_CONFIG);
if(rename(DNSMASQ_TEMP_CONF, DNSMASQ_PH_CONFIG) != 0)
// Check if the new config file is different from the old one
// Skip the first 24 lines as they contain the header
if(files_different(DNSMASQ_TEMP_CONF, DNSMASQ_PH_CONFIG, 24))
{
log_err("Cannot install dnsmasq config file: %s", strerror(errno));
return false;
if(rename(DNSMASQ_TEMP_CONF, DNSMASQ_PH_CONFIG) != 0)
{
log_err("Cannot install dnsmasq config file: %s", strerror(errno));
return false;
}
log_debug(DEBUG_CONFIG, "Config file written to "DNSMASQ_PH_CONFIG);
}
// Close file
if(fclose(pihole_conf) != 0)
else
{
log_err("Cannot close dnsmasq config file: %s", strerror(errno));
return false;
log_debug(DEBUG_CONFIG, "dnsmasq.conf unchanged");
// Remove temporary config file
if(remove(DNSMASQ_TEMP_CONF) != 0)
{
log_err("Cannot remove temporary dnsmasq config file: %s", strerror(errno));
return false;
}
}
return true;
}
@ -788,19 +923,19 @@ bool write_custom_list(void)
}
}
log_debug(DEBUG_CONFIG, "Opening "DNSMASQ_CUSTOM_LIST" for writing");
FILE *custom_list = fopen(DNSMASQ_CUSTOM_LIST, "w");
log_debug(DEBUG_CONFIG, "Opening "DNSMASQ_CUSTOM_LIST_LEGACY".tmp for writing");
FILE *custom_list = fopen(DNSMASQ_CUSTOM_LIST_LEGACY".tmp", "w");
// Return early if opening failed
if(!custom_list)
{
log_err("Cannot open "DNSMASQ_CUSTOM_LIST" for writing, unable to update custom.list: %s", strerror(errno));
log_err("Cannot open "DNSMASQ_CUSTOM_LIST_LEGACY".tmp for writing, unable to update custom.list: %s", strerror(errno));
return false;
}
// Lock file, may block if the file is currently opened
if(flock(fileno(custom_list), LOCK_EX) != 0)
{
log_err("Cannot open "DNSMASQ_CUSTOM_LIST" in exclusive mode: %s", strerror(errno));
log_err("Cannot open "DNSMASQ_CUSTOM_LIST_LEGACY".tmp in exclusive mode: %s", strerror(errno));
fclose(custom_list);
return false;
}
@ -841,5 +976,28 @@ bool write_custom_list(void)
log_err("Cannot close custom.list: %s", strerror(errno));
return false;
}
// Check if the new config file is different from the old one
// Skip the first 24 lines as they contain the header
if(files_different(DNSMASQ_CUSTOM_LIST_LEGACY".tmp", DNSMASQ_CUSTOM_LIST, 24))
{
if(rename(DNSMASQ_CUSTOM_LIST_LEGACY".tmp", DNSMASQ_CUSTOM_LIST) != 0)
{
log_err("Cannot install custom.list: %s", strerror(errno));
return false;
}
log_debug(DEBUG_CONFIG, "HOSTS file written to "DNSMASQ_CUSTOM_LIST);
}
else
{
log_debug(DEBUG_CONFIG, "custom.list unchanged");
// Remove temporary config file
if(remove(DNSMASQ_CUSTOM_LIST_LEGACY".tmp") != 0)
{
log_err("Cannot remove temporary custom.list: %s", strerror(errno));
return false;
}
}
return true;
}

548
src/config/env.c Normal file
View File

@ -0,0 +1,548 @@
/* Pi-hole: A black hole for Internet advertisements
* (c) 2023 Pi-hole, LLC (https://pi-hole.net)
* Network-wide ad blocking via your own hardware.
*
* FTL Engine
* Environment-related routines
*
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#include "env.h"
#include "log.h"
#include "config/config.h"
// get_refresh_hostnames_str()
#include "datastructure.h"
//set_and_check_password()
#include "config/password.h"
// cli_tick()
#include "args.h"
// suggest_closest()
#include "config/suggest.h"
struct env_item
{
bool used;
bool valid;
char *key;
char *value;
const char *error;
const char *allowed;
struct env_item *next;
};
static struct env_item *env_list = NULL;
void getEnvVars(void)
{
// Read environment variables only once
if(env_list != NULL)
return;
// Get all environment variables
for(char **env = environ; *env != NULL; env++)
{
// Check if this is a FTLCONF_ variable
if(strncmp(*env, FTLCONF_PREFIX, sizeof(FTLCONF_PREFIX) - 1) == 0)
{
// Split key and value
char *key = strtok(*env, "=");
char *value = strtok(NULL, "=");
// Add to list
struct env_item *new_item = calloc(1, sizeof(struct env_item));
new_item->used = false;
new_item->key = strdup(key);
new_item->value = strdup(value);
new_item->error = NULL;
new_item->allowed = NULL;
new_item->next = env_list;
env_list = new_item;
}
}
}
void printFTLenv(void)
{
// Nothing to print if no env vars are used
if(env_list == NULL)
return;
// Count number of used and ignored env vars
unsigned int used = 0, invalid = 0, ignored = 0;
for(struct env_item *item = env_list; item != NULL; item = item->next)
{
if(item->used)
if(item->valid)
used++;
else
invalid++;
else
ignored++;
}
const unsigned int sum = used + invalid + ignored;
log_info("%u FTLCONF environment variable%s found (%u used, %u invalid, %u ignored)",
sum, sum == 1 ? "" : "s", used, invalid, ignored);
// Iterate over all known FTLCONF environment variables
for(struct env_item *item = env_list; item != NULL; item = item->next)
{
if(item->used)
{
if(item->valid)
log_info(" %s %s is used", cli_tick(), item->key);
else
{
if(item->error != NULL && item->allowed == NULL)
log_err(" %s %s is invalid (%s)",
cli_cross(), item->key, item->error);
else if(item->error != NULL && item->allowed != NULL)
log_err(" %s %s is invalid (%s, allowed options are: %s)",
cli_cross(), item->key, item->error, item->allowed);
else
log_err(" %s %s is invalid",
cli_cross(), item->key);
}
continue;
}
// else: print warning
unsigned int N = 0;
char **matches = suggest_closest_conf_key(true, item->key, &N);
// Print the closest matches
log_warn("%s %s is unknown, did you mean any of these?", cli_qst(), item->key);
for(size_t i = 0; i < N; ++i)
log_warn(" - %s", matches[i]);
free(matches);
}
}
static struct env_item *__attribute__((pure)) getFTLenv(const char *key)
{
// Iterate over all known FTLCONF environment variables
for(struct env_item *item = env_list; item != NULL; item = item->next)
{
// Check if this is the requested key
if(strcmp(item->key, key) == 0)
return item;
}
// Return NULL if the key was not found
return NULL;
}
void freeEnvVars(void)
{
// Free all environment variables
while(env_list != NULL)
{
struct env_item *next = env_list->next;
free(env_list->key);
free(env_list->value);
free(env_list);
env_list = next;
}
}
bool readEnvValue(struct conf_item *conf_item, struct config *newconf)
{
// First check if a environmental variable with the given key exists by
// iterating over the list of FTLCONF_ variables
struct env_item *item = getFTLenv(conf_item->e);
// Return early if this environment variable does not exist
if(item == NULL)
return false;
// Mark this environment variable as used
item->used = true;
// else: We found an environment variable with the given key
const char *envvar = item != NULL ? item->value : NULL;
log_debug(DEBUG_CONFIG, "ENV %s = %s", conf_item->e, envvar);
switch(conf_item->t)
{
case CONF_BOOL:
{
if(strcasecmp(envvar, "true") == 0 || strcasecmp(envvar, "yes") == 0)
{
conf_item->v.b = true;
item->valid = true;
}
else if(strcasecmp(envvar, "false") == 0 || strcasecmp(envvar, "no") == 0)
{
conf_item->v.b = false;
item->valid = true;
}
else
{
item->error = "not of type bool";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_ALL_DEBUG_BOOL:
{
if(strcasecmp(envvar, "true") == 0 || strcasecmp(envvar, "yes") == 0)
{
set_all_debug(newconf, true);
item->valid = true;
}
else if(strcasecmp(envvar, "false") == 0 || strcasecmp(envvar, "no") == 0)
{
set_all_debug(newconf, false);
item->valid = true;
}
else
{
item->error = "not of type bool";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_INT:
{
int val = 0;
if(sscanf(envvar, "%i", &val) == 1)
{
conf_item->v.i = val;
item->valid = true;
}
else
{
item->error = "not of type integer";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_UINT:
{
unsigned int val = 0;
if(sscanf(envvar, "%u", &val) == 1)
{
conf_item->v.ui = val;
item->valid = true;
}
else
{
item->error = "not of type unsigned integer";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_UINT16:
{
unsigned int val = 0;
if(sscanf(envvar, "%u", &val) == 1 && val <= UINT16_MAX)
{
conf_item->v.ui = val;
item->valid = true;
}
else
{
item->error = "not of type unsigned integer (16 bit";
log_warn("ENV %s is %s)", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_LONG:
{
long val = 0;
if(sscanf(envvar, "%li", &val) == 1)
{
conf_item->v.l = val;
item->valid = true;
}
else
{
item->error = "not of type long";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_ULONG:
{
unsigned long val = 0;
if(sscanf(envvar, "%lu", &val) == 1)
{
conf_item->v.ul = val;
item->valid = true;
}
else
{
item->error = "not of type unsigned long";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_DOUBLE:
{
double val = 0;
if(sscanf(envvar, "%lf", &val) == 1)
{
conf_item->v.d = val;
item->valid = true;
}
else
{
item->error = "not of type double";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_STRING:
case CONF_STRING_ALLOCATED:
{
if(conf_item->t == CONF_STRING_ALLOCATED)
free(conf_item->v.s);
conf_item->v.s = strdup(envvar);
conf_item->t = CONF_STRING_ALLOCATED;
item->valid = true;
break;
}
case CONF_ENUM_PTR_TYPE:
{
const int ptr_type = get_ptr_type_val(envvar);
if(ptr_type != -1)
{
conf_item->v.ptr_type = ptr_type;
item->valid = true;
}
else
{
item->error = "not an allowed option";
item->allowed = conf_item->h;
log_warn("ENV %s is %s, allowed options are: %s",
conf_item->e, item->error, item->allowed);
item->valid = false;
}
break;
}
case CONF_ENUM_BUSY_TYPE:
{
const int busy_reply = get_busy_reply_val(envvar);
if(busy_reply != -1)
{
conf_item->v.busy_reply = busy_reply;
item->valid = true;
}
else
{
item->error = "not an allowed option";
item->allowed = conf_item->h;
log_warn("ENV %s is %s, allowed options are: %s",
conf_item->e, item->error, item->allowed);
item->valid = false;
}
break;
}
case CONF_ENUM_BLOCKING_MODE:
{
const int blocking_mode = get_blocking_mode_val(envvar);
if(blocking_mode != -1)
{
conf_item->v.blocking_mode = blocking_mode;
item->valid = true;
}
else
{
item->error = "not an allowed option";
item->allowed = conf_item->h;
log_warn("ENV %s is %s, allowed options are: %s",
conf_item->e, item->error, item->allowed);
item->valid = false;
}
break;
}
case CONF_ENUM_REFRESH_HOSTNAMES:
{
const int refresh_hostnames = get_refresh_hostnames_val(envvar);
if(refresh_hostnames != -1)
{
conf_item->v.refresh_hostnames = refresh_hostnames;
item->valid = true;
}
else
{
item->error = "not an allowed option";
item->allowed = conf_item->h;
log_warn("ENV %s is %s, allowed options are: %s",
conf_item->e, item->error, item->allowed);
item->valid = false;
}
break;
}
case CONF_ENUM_LISTENING_MODE:
{
const int listeningMode = get_listeningMode_val(envvar);
if(listeningMode != -1)
{
conf_item->v.listeningMode = listeningMode;
item->valid = true;
}
else
{
item->error = "not an allowed option";
item->allowed = conf_item->h;
log_warn("ENV %s is %s, allowed options are: %s",
conf_item->e, item->error, item->allowed);
item->valid = false;
}
break;
}
case CONF_ENUM_WEB_THEME:
{
const int web_theme = get_web_theme_val(envvar);
if(web_theme != -1)
{
conf_item->v.web_theme = web_theme;
item->valid = true;
}
else
{
item->error = "not an allowed option";
item->allowed = conf_item->h;
log_warn("ENV %s is %s, allowed options are: %s",
conf_item->e, item->error, item->allowed);
item->valid = false;
}
break;
}
case CONF_ENUM_TEMP_UNIT:
{
const int temp_unit = get_temp_unit_val(envvar);
if(temp_unit != -1)
{
conf_item->v.temp_unit = temp_unit;
item->valid = true;
}
else
{
item->error = "not an allowed option";
item->allowed = conf_item->h;
log_warn("ENV %s is %s, allowed options are: %s",
conf_item->e, item->error, item->allowed);
item->valid = false;
}
break;
}
case CONF_ENUM_PRIVACY_LEVEL:
{
int val = 0;
if(sscanf(envvar, "%i", &val) == 1 && val >= PRIVACY_SHOW_ALL && val <= PRIVACY_MAXIMUM)
{
conf_item->v.i = val;
item->valid = true;
}
else
{
item->error = "not of type integer or outside allowed bounds";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_STRUCT_IN_ADDR:
{
struct in_addr addr4 = { 0 };
if(strlen(envvar) == 0)
{
// Special case: empty string -> 0.0.0.0
conf_item->v.in_addr.s_addr = INADDR_ANY;
}
else if(inet_pton(AF_INET, envvar, &addr4))
{
memcpy(&conf_item->v.in_addr, &addr4, sizeof(addr4));
item->valid = true;
}
else
{
item->error = "not of type IPv4 address";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_STRUCT_IN6_ADDR:
{
struct in6_addr addr6 = { 0 };
if(strlen(envvar) == 0)
{
// Special case: empty string -> ::
memcpy(&conf_item->v.in6_addr, &in6addr_any, sizeof(in6addr_any));
}
else if(inet_pton(AF_INET6, envvar, &addr6))
{
memcpy(&conf_item->v.in6_addr, &addr6, sizeof(addr6));
item->valid = true;
}
else
{
item->error = "not of type IPv6 address";
log_warn("ENV %s is %s", conf_item->e, item->error);
item->valid = false;
}
break;
}
case CONF_JSON_STRING_ARRAY:
{
// Make a copy of envvar as strtok modified the input string
char *envvar_copy = strdup(envvar);
// Free previously allocated JSON array
cJSON_Delete(conf_item->v.json);
conf_item->v.json = cJSON_CreateArray();
// Parse envvar array and generate a JSON array (env var
// arrays are ;-delimited)
const char delim[] =";";
const char *elem = strtok(envvar_copy, delim);
while(elem != NULL)
{
// Only import non-empty entries
if(strlen(elem) > 0)
{
// Add string to our JSON array
cJSON *citem = cJSON_CreateString(elem);
cJSON_AddItemToArray(conf_item->v.json, citem);
}
// Search for the next element
elem = strtok(NULL, delim);
}
free(envvar_copy);
item->valid = true;
break;
}
case CONF_PASSWORD:
{
if(!set_and_check_password(conf_item, envvar))
{
log_warn("ENV %s is invalid", conf_item->e);
item->valid = false;
break;
}
item->valid = true;
break;
}
}
return true;
}

28
src/config/env.h Normal file
View File

@ -0,0 +1,28 @@
/* Pi-hole: A black hole for Internet advertisements
* (c) 2023 Pi-hole, LLC (https://pi-hole.net)
* Network-wide ad blocking via your own hardware.
*
* FTL Engine
* Environment-related prototypes
*
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#ifndef CONFIG_ENV_H
#define CONFIG_ENV_H
#include "FTL.h"
// union conf_value
#include "config.h"
// type toml_table_t
#include "tomlc99/toml.h"
#define FTLCONF_PREFIX "FTLCONF_"
int dist(const char *str);
void getEnvVars(void);
void freeEnvVars(void);
void printFTLenv(void);
bool readEnvValue(struct conf_item *conf_item, struct config *newconf);
#endif //CONFIG_ENV_H

View File

@ -106,18 +106,38 @@ bool check_inotify_event(void)
// Check if this is the event we are looking for
if(event->mask & IN_CLOSE_WRITE)
{
// File opened for writing was closed
log_debug(DEBUG_INOTIFY, "File written: "WATCHDIR"/%s", event->name);
if(strcmp(event->name, "pihole.toml") == 0)
config_changed = true;
}
else if(event->mask & IN_CREATE)
{
// File was created
log_debug(DEBUG_INOTIFY, "File created: "WATCHDIR"/%s", event->name);
else if(event->mask & IN_MOVE)
log_debug(DEBUG_INOTIFY, "File moved: "WATCHDIR"/%s", event->name);
}
else if(event->mask & IN_MOVED_FROM)
{
// File was moved (source)
log_debug(DEBUG_INOTIFY, "File moved from: "WATCHDIR"/%s", event->name);
}
else if(event->mask & IN_MOVED_TO)
{
// File was moved (target)
log_debug(DEBUG_INOTIFY, "File moved to: "WATCHDIR"/%s", event->name);
if(strcmp(event->name, "pihole.toml") == 0)
config_changed = true;
}
else if(event->mask & IN_DELETE)
{
// File was deleted
log_debug(DEBUG_INOTIFY, "File deleted: "WATCHDIR"/%s", event->name);
}
else if(event->mask & IN_IGNORED)
{
// Watch descriptor was removed
log_warn("Inotify watch descriptor for "WATCHDIR" was removed (directory deleted or unmounted?)");
}
else
log_debug(DEBUG_INOTIFY, "Unknown event (%X) on watched file: "WATCHDIR"/%s", event->mask, event->name);
}

View File

@ -11,7 +11,7 @@
#include "FTL.h"
#include "legacy_reader.h"
#include "config.h"
#include "setupVars.h"
#include "config/setupVars.h"
#include "log.h"
// nice()
#include <unistd.h>
@ -43,7 +43,7 @@ static FILE * __attribute__((nonnull(1), malloc, warn_unused_result)) openFTLcon
return fp;
// Local file not present, try system file
*path = "/etc/pihole/pihole-FTL.conf";
*path = GLOBALCONFFILE_LEGACY;
fp = fopen(*path, "r");
return fp;
@ -113,9 +113,12 @@ const char *readFTLlegacy(struct config *conf)
const char *path = NULL;
FILE *fp = openFTLconf(&path);
if(fp == NULL)
{
log_warn("No readable FTL config file found, using default settings");
return NULL;
}
log_notice("Reading legacy config file");
log_info("Reading legacy config files from %s", path);
// MAXDBDAYS
// defaults to: 365 days
@ -337,6 +340,10 @@ const char *readFTLlegacy(struct config *conf)
// 2.0) had the range -infinity..15.
buffer = parseFTLconf(fp, "NICE");
value = 0;
if(buffer != NULL && sscanf(buffer, "%i", &value) && value >= -20 && value <= 19)
conf->misc.nice.v.i = value;
// MAXNETAGE
// IP addresses (and associated host names) older than the specified number
// of days are removed to avoid dead entries in the network overview table
@ -401,7 +408,7 @@ const char *readFTLlegacy(struct config *conf)
conf->dns.reply.host.force4.v.b = false;
conf->dns.reply.host.v4.v.in_addr.s_addr = 0;
buffer = parseFTLconf(fp, "LOCAL_IPV4");
if(buffer != NULL && inet_pton(AF_INET, buffer, &conf->dns.reply.host.v4.v.in_addr))
if(buffer != NULL && strlen(buffer) > 0 && inet_pton(AF_INET, buffer, &conf->dns.reply.host.v4.v.in_addr))
conf->dns.reply.host.force4.v.b = true;
// LOCAL_IPV6
@ -411,7 +418,7 @@ const char *readFTLlegacy(struct config *conf)
conf->dns.reply.host.force6.v.b = false;
memset(&conf->dns.reply.host.v6.v.in6_addr, 0, sizeof(conf->dns.reply.host.v6.v.in6_addr));
buffer = parseFTLconf(fp, "LOCAL_IPV6");
if(buffer != NULL && inet_pton(AF_INET6, buffer, &conf->dns.reply.host.v6.v.in6_addr))
if(buffer != NULL && strlen(buffer) > 0 && inet_pton(AF_INET6, buffer, &conf->dns.reply.host.v6.v.in6_addr))
conf->dns.reply.host.force6.v.b = true;
// BLOCK_IPV4
@ -420,7 +427,7 @@ const char *readFTLlegacy(struct config *conf)
conf->dns.reply.blocking.force4.v.b = false;
conf->dns.reply.blocking.v4.v.in_addr.s_addr = 0;
buffer = parseFTLconf(fp, "BLOCK_IPV4");
if(buffer != NULL && inet_pton(AF_INET, buffer, &conf->dns.reply.blocking.v4.v.in_addr))
if(buffer != NULL && strlen(buffer) > 0 && inet_pton(AF_INET, buffer, &conf->dns.reply.blocking.v4.v.in_addr))
conf->dns.reply.blocking.force4.v.b = true;
// BLOCK_IPV6
@ -429,7 +436,7 @@ const char *readFTLlegacy(struct config *conf)
conf->dns.reply.blocking.force6.v.b = false;
memset(&conf->dns.reply.blocking.v6.v.in6_addr, 0, sizeof(conf->dns.reply.host.v6.v.in6_addr));
buffer = parseFTLconf(fp, "BLOCK_IPV6");
if(buffer != NULL && inet_pton(AF_INET6, buffer, &conf->dns.reply.blocking.v6.v.in6_addr))
if(buffer != NULL && strlen(buffer) > 0 && inet_pton(AF_INET6, buffer, &conf->dns.reply.blocking.v6.v.in6_addr))
conf->dns.reply.blocking.force6.v.b = true;
// REPLY_ADDR4 (deprecated setting)
@ -438,7 +445,7 @@ const char *readFTLlegacy(struct config *conf)
// defaults to: not set
struct in_addr reply_addr4;
buffer = parseFTLconf(fp, "REPLY_ADDR4");
if(buffer != NULL && inet_pton(AF_INET, buffer, &reply_addr4))
if(buffer != NULL && strlen(buffer) > 0 && inet_pton(AF_INET, buffer, &reply_addr4))
{
if(conf->dns.reply.host.force4.v.b || conf->dns.reply.blocking.force4.v.b)
{
@ -459,7 +466,7 @@ const char *readFTLlegacy(struct config *conf)
// defaults to: not set
struct in6_addr reply_addr6;
buffer = parseFTLconf(fp, "REPLY_ADDR6");
if(buffer != NULL && inet_pton(AF_INET, buffer, &reply_addr6))
if(buffer != NULL && strlen(buffer) > 0 && inet_pton(AF_INET, buffer, &reply_addr6))
{
if(conf->dns.reply.host.force6.v.b || conf->dns.reply.blocking.force6.v.b)
{
@ -576,8 +583,8 @@ const char *readFTLlegacy(struct config *conf)
// Release memory
releaseConfigMemory();
if(fp != NULL)
fclose(fp);
// Close file
fclose(fp);
return path;
}
@ -824,12 +831,7 @@ static void readDebugingSettingsLegacy(FILE *fp)
setDebugOption(fp, "DEBUG_ALL", ~(enum debug_flag)0);
for(enum debug_flag flag = DEBUG_DATABASE; flag < DEBUG_EXTRA; flag <<= 1)
{
// DEBUG_DATABASE
const char *name;
debugstr(flag, &name);
setDebugOption(fp, name, flag);
}
setDebugOption(fp, debugstr(flag), flag);
// Parse debug options
set_debug_flags(&config);

View File

@ -91,6 +91,11 @@ static char * __attribute__((malloc)) base64_encode(const uint8_t *data, const s
out_len = base64_encode_update(&ctx, encoded, length, data);
out_len += base64_encode_final(&ctx, encoded + out_len);
// Length check
if(out_len > BASE64_ENCODE_LENGTH(length) + BASE64_ENCODE_FINAL_LENGTH)
log_warn("Base64 encoding may have failed: Output buffer too small? (%zu > %zu)",
out_len, BASE64_ENCODE_LENGTH(length) + BASE64_ENCODE_FINAL_LENGTH);
return encoded;
}
@ -259,7 +264,7 @@ static bool parse_PHC_string(const char *phc, size_t *s_cost, size_t *t_cost, ui
// Decode salt and hash
size_t salt_len = 0;
*salt = base64_decode(salt_base64, &salt_len);
if(salt == NULL)
if(*salt == NULL)
{
// Error
log_err("Error while decoding salt: %s", strerror(errno));
@ -275,7 +280,7 @@ static bool parse_PHC_string(const char *phc, size_t *s_cost, size_t *t_cost, ui
size_t hash_len = 0;
*hash = base64_decode(hash_base64, &hash_len);
if(hash == NULL)
if(*hash == NULL)
{
// Error
log_err("Error while decoding hash: %s", strerror(errno));
@ -323,6 +328,7 @@ enum password_result verify_login(const char *password)
log_debug(DEBUG_API, "App password correct");
return APPPASSWORD_CORRECT;
}
// Return result
return pw;
}
@ -331,7 +337,7 @@ enum password_result verify_password(const char *password, const char *pwhash, c
{
// No password set
if(pwhash == NULL || pwhash[0] == '\0')
return PASSWORD_CORRECT;
return NO_PASSWORD_SET;
// No password supplied
if(password == NULL || password[0] == '\0')
@ -376,10 +382,8 @@ enum password_result verify_password(const char *password, const char *pwhash, c
// Free allocated memory
free(supplied);
if(salt != NULL)
free(salt);
if(config_hash != NULL)
free(config_hash);
free(salt);
free(config_hash);
// Successful logins do not count against rate-limiting
if(result)
@ -408,11 +412,10 @@ enum password_result verify_password(const char *password, const char *pwhash, c
writeFTLtoml(true);
free(new_hash);
}
}
// Successful logins do not count against rate-limiting
if(result)
// Successful logins do not count against rate-limiting
num_password_attempts--;
}
return result ? PASSWORD_CORRECT : PASSWORD_INCORRECT;
}
@ -476,8 +479,8 @@ static int performance_test_task(const size_t s_cost, const size_t t_cost, const
printf("s = %5zu, t = %5zu took %6.1f +/- %4.1f ms (scratch buffer %6.1f%1sB) -> %.0f\n",
s_cost, t_cost, 1e3*avg, 1e3*stdev, formatted, prefix, 1.0*(s_cost*t_cost)/avg);
// Break if test took longer than two seconds
if(avg > 2)
// Break if test took longer than half a second
if(avg > 0.5)
return 1;
return 0;
}
@ -604,8 +607,9 @@ bool set_and_check_password(struct conf_item *conf_item, const char *password)
// Get password hash as allocated string (an empty string is hashed to an empty string)
char *pwhash = strlen(password) > 0 ? create_password(password) : strdup("");
// Verify that the password hash is valid
if(verify_password(password, pwhash, false) != PASSWORD_CORRECT)
// Verify that the password hash is valid or that no password is set
const enum password_result status = verify_password(password, pwhash, false);
if(status != PASSWORD_CORRECT && status != NO_PASSWORD_SET)
{
free(pwhash);
log_warn("Failed to create password hash (verification failed), password remains unchanged");

View File

@ -26,6 +26,7 @@ enum password_result {
PASSWORD_INCORRECT = 0,
PASSWORD_CORRECT = 1,
APPPASSWORD_CORRECT = 2,
NO_PASSWORD_SET = 3,
PASSWORD_RATE_LIMITED = -1
} __attribute__((packed));

View File

@ -11,7 +11,7 @@
#include "FTL.h"
#include "log.h"
#include "config/config.h"
#include "setupVars.h"
#include "config/setupVars.h"
#include "datastructure.h"
unsigned int setupVarsElements = 0;
@ -19,6 +19,13 @@ char ** setupVarsArray = NULL;
static void get_conf_string_from_setupVars(const char *key, struct conf_item *conf_item)
{
// Verify we are allowed to use this function
if(conf_item->t != CONF_STRING && conf_item->t != CONF_STRING_ALLOCATED)
{
log_err("get_conf_string_from_setupVars(%s) failed: conf_item->t is neither CONF_STRING nor CONF_STRING_ALLOCATED", key);
return;
}
const char *setupVarsValue = read_setupVarsconf(key);
if(setupVarsValue == NULL)
{
@ -35,6 +42,7 @@ static void get_conf_string_from_setupVars(const char *key, struct conf_item *co
free(conf_item->v.s);
conf_item->v.s = strdup(setupVarsValue);
conf_item->t = CONF_STRING_ALLOCATED;
conf_item->f |= FLAG_CONF_IMPORTED;
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
@ -43,8 +51,50 @@ static void get_conf_string_from_setupVars(const char *key, struct conf_item *co
log_debug(DEBUG_CONFIG, "setupVars.conf:%s -> Setting %s to %s", key, conf_item->k, conf_item->v.s);
}
static void get_conf_ipv4_from_setupVars(const char *key, struct conf_item *conf_item)
{
// Verify we are allowed to use this function
if(conf_item->t != CONF_STRUCT_IN_ADDR)
{
log_err("get_conf_ipv4_from_setupVars(%s) failed: conf_item->t != CONF_STRUCT_IN_ADDR", key);
return;
}
const char *setupVarsValue = read_setupVarsconf(key);
if(setupVarsValue == NULL)
{
// Do not change default value, this value is not set in setupVars.conf
log_debug(DEBUG_CONFIG, "setupVars.conf:%s -> Not set", key);
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
return;
}
if(strlen(setupVarsValue) == 0)
memset(&conf_item->v.in_addr, 0, sizeof(struct in_addr));
else if(inet_pton(AF_INET, setupVarsValue, &conf_item->v.in_addr) != 1)
{
log_debug(DEBUG_CONFIG, "setupVars.conf:%s -> Invalid IPv4 address: %s", key, setupVarsValue);
memset(&conf_item->v.in_addr, 0, sizeof(struct in_addr));
}
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
// Parameter present in setupVars.conf
log_debug(DEBUG_CONFIG, "setupVars.conf:%s -> Setting %s to %s", key, conf_item->k, inet_ntoa(conf_item->v.in_addr));
}
static void get_conf_bool_from_setupVars(const char *key, struct conf_item *conf_item)
{
// Verify we are allowed to use this function
if(conf_item->t != CONF_BOOL)
{
log_err("get_conf_bool_from_setupVars(%s) failed: conf_item->t != CONF_BOOL", key);
return;
}
const char *boolean = read_setupVarsconf(key);
if(boolean == NULL)
@ -70,8 +120,92 @@ static void get_conf_bool_from_setupVars(const char *key, struct conf_item *conf
key, conf_item->k, conf_item->v.b ? "true" : "false");
}
static void get_conf_string_array_from_setupVars(const char *key, struct conf_item *conf_item)
static void get_revServer_from_setupVars(void)
{
bool active = false;
char *cidr = NULL;
char *target = NULL;
char *domain = NULL;
const char *active_str = read_setupVarsconf("REV_SERVER");
if(active_str == NULL)
{
// Do not change default value, this value is not set in setupVars.conf
log_debug(DEBUG_CONFIG, "setupVars.conf:REV_SERVER -> Not set");
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
return;
}
else
{
// Parameter present in setupVars.conf
active = getSetupVarsBool(active_str);
}
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
char *cidr_str = read_setupVarsconf("REV_SERVER_CIDR");
if(cidr_str != NULL)
{
cidr = strdup(cidr_str);
trim_whitespace(cidr);
}
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
char *target_str = read_setupVarsconf("REV_SERVER_TARGET");
if(target_str != NULL)
{
target = strdup(target_str);
trim_whitespace(target);
}
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
char *domain_str = read_setupVarsconf("REV_SERVER_DOMAIN");
if(domain_str != NULL)
{
domain = strdup(domain_str);
trim_whitespace(domain);
}
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
clearSetupVarsArray();
if(active && cidr != NULL && target != NULL && domain != NULL)
{
// Build comma-separated string of all values
char *old = calloc(strlen(active_str) + strlen(cidr) + strlen(target) + strlen(domain) + 4, sizeof(char));
if(old)
{
// Add to new config
sprintf(old, "%s,%s,%s,%s", active_str, cidr, target, domain);
cJSON_AddItemToArray(config.dns.revServers.v.json, cJSON_CreateString(old));
free(old);
}
}
// Free memory
if(cidr != NULL)
free(cidr);
if(target != NULL)
free(target);
if(domain != NULL)
free(domain);
}
static void get_conf_string_array_from_setupVars_regex(const char *key, struct conf_item *conf_item)
{
// Verify we are allowed to use this function
if(conf_item->t != CONF_JSON_STRING_ARRAY)
{
log_err("get_conf_string_array_from_setupVars(%s) failed: conf_item->t != CONF_JSON_STRING_ARRAY", key);
return;
}
// Get clients which the user doesn't want to see
const char *array = read_setupVarsconf(key);
@ -80,12 +214,56 @@ static void get_conf_string_array_from_setupVars(const char *key, struct conf_it
getSetupVarsArray(array);
for (unsigned int i = 0; i < setupVarsElements; ++i)
{
// Convert to regex by adding ^ and $ to the string and replacing . with \.
// We need to allocate memory for this
char *regex = calloc(2*strlen(setupVarsArray[i]), sizeof(char));
if(regex == NULL)
{
log_warn("get_conf_string_array_from_setupVars(%s) failed: Could not allocate memory for regex", key);
continue;
}
// Copy string
strcpy(regex, setupVarsArray[i]);
// Replace . with \.
char *p = regex;
while(*p)
{
if(*p == '.')
{
// Move the rest of the string one character to the right
memmove(p + 1, p, strlen(p) + 1);
// Insert the escape character
*p = '\\';
// Skip the escape character
p++;
}
p++;
}
// Add ^ and $ to the string
char *regex2 = calloc(strlen(regex) + 3, sizeof(char));
if(regex2 == NULL)
{
log_warn("get_conf_string_array_from_setupVars(%s) failed: Could not allocate memory for regex2", key);
free(regex);
continue;
}
sprintf(regex2, "^%s$", regex);
// Free memory
free(regex);
// Add string to our JSON array
cJSON *item = cJSON_CreateString(setupVarsArray[i]);
cJSON *item = cJSON_CreateString(regex2);
cJSON_AddItemToArray(conf_item->v.json, item);
log_debug(DEBUG_CONFIG, "setupVars.conf:%s -> Setting %s[%u] = %s\n",
key, conf_item->k, i, item->valuestring);
key, conf_item->k, i, item->valuestring);
// Free memory
free(regex2);
}
}
@ -95,6 +273,13 @@ static void get_conf_string_array_from_setupVars(const char *key, struct conf_it
static void get_conf_upstream_servers_from_setupVars(struct conf_item *conf_item)
{
// Verify we are allowed to use this function
if(conf_item->t != CONF_JSON_STRING_ARRAY)
{
log_err("get_conf_upstream_servers_from_setupVars() failed: conf_item->t != CONF_JSON_STRING_ARRAY");
return;
}
// Try to import up to 50 servers...
#define MAX_SERVERS 50
for(unsigned int j = 0; j < MAX_SERVERS; j++)
@ -179,7 +364,7 @@ static void get_conf_weblayout_from_setupVars(void)
// If the property is set to false and different than "boxed", the property
// is disabled. This is consistent with the code in AdminLTE when writing
// this code
if(web_layout != NULL && strcasecmp(web_layout, "boxed") != 0)
if(strcasecmp(web_layout, "boxed") != 0)
config.webserver.interface.boxed.v.b = false;
// Free memory, harmless to call if read_setupVarsconf() didn't return a result
@ -311,6 +496,8 @@ static void get_conf_listeningMode_from_setupVars(void)
void importsetupVarsConf(void)
{
log_info("Migrating config from %s", config.files.setupVars.v.s);
// Try to obtain password hash from setupVars.conf
get_conf_string_from_setupVars("WEBPASSWORD", &config.webserver.api.pwhash);
@ -318,10 +505,10 @@ void importsetupVarsConf(void)
get_conf_bool_from_setupVars("BLOCKING_ENABLED", &config.dns.blocking.active);
// Get clients which the user doesn't want to see
get_conf_string_array_from_setupVars("API_EXCLUDE_CLIENTS", &config.webserver.api.excludeClients);
get_conf_string_array_from_setupVars_regex("API_EXCLUDE_CLIENTS", &config.webserver.api.excludeClients);
// Get domains which the user doesn't want to see
get_conf_string_array_from_setupVars("API_EXCLUDE_DOMAINS", &config.webserver.api.excludeDomains);
get_conf_string_array_from_setupVars_regex("API_EXCLUDE_DOMAINS", &config.webserver.api.excludeDomains);
// Try to obtain temperature hot value
get_conf_temp_limit_from_setupVars();
@ -353,16 +540,13 @@ void importsetupVarsConf(void)
get_conf_listeningMode_from_setupVars();
// Try to obtain REV_SERVER settings
get_conf_bool_from_setupVars("REV_SERVER", &config.dns.revServer.active);
get_conf_string_from_setupVars("REV_SERVER_CIDR", &config.dns.revServer.cidr);
get_conf_string_from_setupVars("REV_SERVER_TARGET", &config.dns.revServer.target);
get_conf_string_from_setupVars("REV_SERVER_DOMAIN", &config.dns.revServer.domain);
get_revServer_from_setupVars();
// Try to obtain DHCP settings
get_conf_bool_from_setupVars("DHCP_ACTIVE", &config.dhcp.active);
get_conf_string_from_setupVars("DHCP_START", &config.dhcp.start);
get_conf_string_from_setupVars("DHCP_END", &config.dhcp.end);
get_conf_string_from_setupVars("DHCP_ROUTER", &config.dhcp.router);
get_conf_ipv4_from_setupVars("DHCP_START", &config.dhcp.start);
get_conf_ipv4_from_setupVars("DHCP_END", &config.dhcp.end);
get_conf_ipv4_from_setupVars("DHCP_ROUTER", &config.dhcp.router);
get_conf_string_from_setupVars("DHCP_LEASETIME", &config.dhcp.leaseTime);
// If the DHCP lease time is set to "24", it is interpreted as "24h".
@ -380,6 +564,26 @@ void importsetupVarsConf(void)
get_conf_bool_from_setupVars("DHCP_RAPID_COMMIT", &config.dhcp.rapidCommit);
get_conf_bool_from_setupVars("queryLogging", &config.dns.queryLogging);
get_conf_string_from_setupVars("GRAVITY_TMPDIR", &config.files.gravity_tmp);
// Ports may be temporarily stored when importing a legacy Teleporter v5 file
get_conf_string_from_setupVars("WEB_PORTS", &config.webserver.port);
// Move the setupVars.conf file to setupVars.conf.old
char *old_setupVars = calloc(strlen(config.files.setupVars.v.s) + 5, sizeof(char));
if(old_setupVars == NULL)
{
log_warn("Could not allocate memory for old_setupVars");
return;
}
strcpy(old_setupVars, config.files.setupVars.v.s);
strcat(old_setupVars, ".old");
if(rename(config.files.setupVars.v.s, old_setupVars) != 0)
log_warn("Could not move %s to %s", config.files.setupVars.v.s, old_setupVars);
else
log_info("Moved %s to %s", config.files.setupVars.v.s, old_setupVars);
free(old_setupVars);
}
char* __attribute__((pure)) find_equals(char *s)
@ -497,15 +701,27 @@ void getSetupVarsArray(const char * input)
/* split string and append tokens to 'res' */
while (p) {
setupVarsArray = realloc(setupVarsArray, sizeof(char*) * ++setupVarsElements);
if(setupVarsArray == NULL) return;
char **tmp = realloc(setupVarsArray, sizeof(char*) * ++setupVarsElements);
if(tmp == NULL)
{
free(setupVarsArray);
setupVarsArray = NULL;
return;
}
setupVarsArray = tmp;
setupVarsArray[setupVarsElements-1] = p;
p = strtok(NULL, ",");
}
/* realloc one extra element for the last NULL */
setupVarsArray = realloc(setupVarsArray, sizeof(char*) * (setupVarsElements+1));
if(setupVarsArray == NULL) return;
char **tmp = realloc(setupVarsArray, sizeof(char*) * (setupVarsElements+1));
if(tmp == NULL)
{
free(setupVarsArray);
setupVarsArray = NULL;
return;
}
setupVarsArray = tmp;
setupVarsArray[setupVarsElements] = NULL;
}

320
src/config/suggest.c Normal file
View File

@ -0,0 +1,320 @@
/* Pi-hole: A black hole for Internet advertisements
* (c) 2023 Pi-hole, LLC (https://pi-hole.net)
* Network-wide ad blocking via your own hardware.
*
* FTL Engine
* String suggestion routines
*
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#include "config/suggest.h"
// Returns the minimum of three size_t values
static size_t min3(size_t x, size_t y, size_t z)
{
const size_t tmp = x < y ? x : y;
return tmp < z ? tmp : z;
}
// Simply swaps two size_t pointers in memory
static void swap(size_t **a, size_t **b)
{
size_t *tmp = *a;
*a = *b;
*b = tmp;
}
// The Levenshtein distance is a string metric for measuring the difference
// between two sequences. Informally, the Levenshtein distance between two words
// is the minimum number of single-character edits (insertions, deletions or
// substitutions) required to change one word into the other. It is named after
// the Soviet mathematician Vladimir Levenshtein, who considered this distance
// in 1965. (Wikipedia)
//
// For example, the Levenshtein distance between "kitten" and "sitting" is 3,
// since the following 3 edits change one into the other, and there is no way to
// do it with fewer than 3 edits:
// kitten -> sitten (substitution of "s" for "k"),
// sitten -> sittin (substitution of "i" for "e"),
// sittin -> sitting (insertion of "g" at the end).
//
// Our implementation follows the algorithm described in Wikipedia but was
// inspired by https://stackoverflow.com/a/71810739/2087442
static size_t levenshtein_distance(const char *s1, const size_t len1, const char *s2, const size_t len2)
{
// Allocate two vectors of size len2 + 1
size_t *v0 = calloc(len2 + 1, sizeof(size_t));
size_t *v1 = calloc(len2 + 1, sizeof(size_t));
// Initialize v0
// v0[i] = the Levenshtein distance between s1[0..i] and the empty string
// v0[i] = i
for (size_t j = 0; j <= len2; ++j)
v0[j] = j;
// Calculate v1
// v1[i] = the Levenshtein distance between s1[0..i] and s2[0..j]
// v1[i] = min(v0[j] + 1, v1[j - 1] + 1, v0[j - 1] + (s1[i] == s2[j] ? 0 : 1))
for (size_t i = 0; i < len1; ++i)
{
// Initialize v1
v1[0] = i + 1;
// Loop over remaining columns
for (size_t j = 0; j < len2; ++j)
{
// Calculate deletion, insertion and substitution costs
const size_t delcost = v0[j + 1] + 1;
const size_t inscost = v1[j] + 1;
const size_t subcost = s1[i] == s2[j] ? v0[j] : v0[j] + 1;
// Take the minimum of the three costs (see above)
v1[j + 1] = min3(delcost, inscost, subcost);
}
// Swap addresses to avoid copying data around
swap(&v0, &v1);
}
// Return the Levenshtein distance between s1 and s2
size_t dist = v0[len2];
free(v0);
free(v1);
return dist;
}
// The Bitap algorithm (also known as the shift-or, shift-and or Baeza-Yates-
// Gonnet algorithm) is an approximate string matching algorithm. The algorithm
// tells whether a given text contains a substring which is "approximately equal"
// to a given pattern, where approximate equality is defined in terms of Levenshtein
// distance — if the substring and pattern are within a given distance k of each
// other, then the algorithm considers them equal. (Wikipedia)
//
// Bitap distinguishes itself from other well-known string searching algorithms in
// its natural mapping onto simple bitwise operations
//
// Notice that in this implementation, counterintuitively, each bit with value
// zero indicates a match, and each bit with value 1 indicates a non-match. The
// same algorithm can be written with the intuitive semantics for 0 and 1, but
// in that case we must introduce another instruction into the inner loop to set
// R |= 1. In this implementation, we take advantage of the fact that
// left-shifting a value shifts in zeros on the right, which is precisely the
// behavior we need.
//
// This implementation is based on https://en.wikipedia.org/wiki/Bitap_algorithm
static const char *__attribute__((pure)) bitap_bitwise_search(const char *text, const char *pattern,
const size_t pattern_len, unsigned int k)
{
// The bit array R is used to keep track of the current state of the
// search.
unsigned long R = ~1;
// The pattern bitmask pattern_mask is used to represent the pattern
// string in a bitwise format. We use a size of 256 because our alphabet
// is all values of an unsigned char (0-255).
unsigned long pattern_mask[256];
// Sanity checks
if (pattern[0] == '\0')
return text;
if (pattern_len > 31)
return NULL;
// Initialize the pattern bitmasks
// First sets all bits in the bitmask to 1, ...
for (unsigned int i = 0; i < sizeof(pattern_mask) / sizeof(*pattern_mask); ++i)
pattern_mask[i] = ~0;
// ... and then set the corresponding bit in the bitmask to 0 for each
// character in the pattern
for (unsigned int i = 0; i < pattern_len; ++i)
pattern_mask[(unsigned char)pattern[i]] &= ~(1UL << i);
// Loop over all characters in the text
for (unsigned int i = 0; text[i] != '\0'; ++i) {
// Update the bit array R based on the pattern bitmask
R |= pattern_mask[(unsigned char)text[i]];
// Shift R one bit to the left
R <<= 1;
// If the bit at the position corresponding to the pattern
// length in `R` is 0, an approximate match of the pattern has
// been found. Return the pointer to the start of this match
if ((R & (1UL << pattern_len)) == 0)
return (text + i - pattern_len) + 1;
}
// No match was found with the given allowed number of errors (k)
return NULL;
}
// Returns the the closest matching string using the Levenshtein distance
static const char *__attribute__((pure)) suggest_levenshtein(const char *strings[], size_t nstrings,
const char *string, const size_t string_len)
{
size_t mindist = 0;
ssize_t minidx = -1;
// The Levenshtein distance is at most the length of the longer string
for(size_t i = 0; i < nstrings; ++i)
{
const size_t len = strlen(strings[i]);
if(len > mindist)
mindist = len;
}
// Loop over all strings and find the closest match
for (size_t i = 0; i < nstrings; ++i)
{
// Calculate the Levenshtein distance between the current string
// (out of nstrings) and the string we are checking against
const char *current = strings[i];
size_t dist = levenshtein_distance(current, strlen(current), string, string_len);
// If the distance is smaller than the smallest minimum we found
// so far, update the minimum and the index of the closest match
if (mindist >= dist)
{
mindist = dist;
minidx = i;
}
}
// Return NULL if no match was found (this can only happen if no
// strings were given)
if(minidx == -1)
return NULL;
// else: Return the closest match
return strings[minidx];
}
// Returns the the closest matching string using fuzzy searching
static unsigned int __attribute__((pure)) suggest_bitap(const char *strings[], size_t nstrings,
const char *string, const size_t string_len,
char **results, unsigned int num_results)
{
unsigned int found = 0;
// Try to find a match with at most j errors
for(unsigned int j = 0; j < string_len; j++)
{
// Iterate over all strings and try to find a match
for(unsigned int i = 0; i < nstrings; ++i)
{
// Get the current string
const char *current = strings[i];
// Use the Bitap algorithm to find a match
const char *result = bitap_bitwise_search(current, string, string_len, j);
// If we found a match, add it to the list of results
if(result != NULL)
results[found++] = (char*)result;
// If we found enough matches, stop searching
if(found >= num_results)
break;
}
// If we found enough matches, stop searching
if(found >= num_results)
break;
}
// Return the number of matches we found
return found;
}
// Find string from list that starts with the given string
static const char *__attribute__((pure)) startswith(const char *strings[], size_t nstrings,
const char *string, const size_t string_len)
{
// Loop over all strings
for (size_t i = 0; i < nstrings; ++i)
{
// Get the current string
const char *current = strings[i];
// If the current string starts with the given string, return it
if(strncasecmp(current, string, string_len) == 0)
return current;
}
// Return NULL if no match was found
return NULL;
}
// Try to find up to two matches using the Bitap algorithm and one using the
// Levenshtein distance
#define MAX_MATCHES 6
static char **__attribute__((pure)) suggest_closest(const char *strings[], size_t nstrings,
const char *string, const size_t string_len,
unsigned int *N)
{
// Allocate memory for MAX_MATCHES matches
char** matches = calloc(MAX_MATCHES, sizeof(char*));
// Try to find (MAX_MATCHES - 2) matches using the Bitap algorithm
*N = suggest_bitap(strings, nstrings, string, string_len, matches, MAX_MATCHES - 2);
// Try to find a match that starts with the given string
matches[(*N)++] = (char*)startswith(strings, nstrings, string, string_len);
// Try to find a last match using the Levenshtein distance
matches[(*N)++] = (char*)suggest_levenshtein(strings, nstrings, string, string_len);
// Loop over matches and remove duplicates
for(unsigned int i = 0; i < *N; ++i)
{
// Skip if there is no match here
if(matches[i] == NULL)
continue;
// Loop over all matches after the current one
for(unsigned int j = i + 1; j < *N; ++j)
{
// Set all duplicates to NULL
if(matches[j] != NULL && strcmp(matches[i], matches[j]) == 0)
{
matches[j] = NULL;
}
}
}
// Remove NULL entries from the list of matches
unsigned int j = 0;
for(unsigned int i = 0; i < *N; ++i)
{
// If the i-th element is not NULL, the i-th element is assigned
// to the j-th position in the array, and j is incremented by 1.
// This effectively moves non-NULL elements towards the front of
// the array.
if(matches[i] != NULL)
matches[j++] = matches[i];
}
// Update the number of matches to the number of non-NULL elements
*N = j;
// Return the list of matches
return matches;
}
char **suggest_closest_conf_key(const bool env, const char *string, unsigned int *N)
{
// Collect all config item keys in a static list
const char *conf_keys[CONFIG_ELEMENTS] = { NULL };
for(unsigned int i = 0; i < CONFIG_ELEMENTS; i++)
{
struct conf_item *conf_item = get_conf_item(&config, i);
if(!conf_item)
continue;
// Use either the environment key or the config key
conf_keys[i] = env ? conf_item->e : conf_item->k;
}
// Return the list of closest matches
return suggest_closest(conf_keys, CONFIG_ELEMENTS, string, strlen(string), N);
}

19
src/config/suggest.h Normal file
View File

@ -0,0 +1,19 @@
/* Pi-hole: A black hole for Internet advertisements
* (c) 2023 Pi-hole, LLC (https://pi-hole.net)
* Network-wide ad blocking via your own hardware.
*
* FTL Engine
* String suggestion prototypes
*
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#ifndef LEVENSHTEIN_H
#define LEVENSHTEIN_H
#include "FTL.h"
// union conf_value
#include "config.h"
char **suggest_closest_conf_key(const bool env, const char *string, unsigned int *N);
#endif //LEVENSHTEIN_H

View File

@ -8,7 +8,6 @@
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#include "FTL.h"
#include "toml_helper.h"
#include "log.h"
#include "config/config.h"
@ -20,33 +19,64 @@
#include "files.h"
//set_and_check_password()
#include "config/password.h"
// PATH_MAX
#include <limits.h>
// Open the TOML file for reading or writing
FILE * __attribute((malloc)) __attribute((nonnull(1))) openFTLtoml(const char *mode)
FILE * __attribute((malloc)) __attribute((nonnull(1))) openFTLtoml(const char *mode, const unsigned int version)
{
FILE *fp;
// Rotate config file, no rotation is done when the file is opened for
// reading (mode == "r")
if(mode[0] != 'r')
rotate_files(GLOBALTOMLPATH, NULL);
// This should not happen, install a safeguard anyway to unveil
// possible future coding issues early on
if(mode[0] == 'w' && version != 0)
{
log_crit("Writing to version != 0 is not supported in openFTLtoml(%s,%u)",
mode, version);
exit(EXIT_FAILURE);
}
// No readable local file found, try global file
fp = fopen(GLOBALTOMLPATH, mode);
// Build filename based on version
char filename[PATH_MAX] = { 0 };
if(version == 0)
{
// Use global config file
strncpy(filename, GLOBALTOMLPATH, sizeof(filename));
// Append ".tmp" if we are writing
if(mode[0] == 'w')
strncat(filename, ".tmp", sizeof(filename));
}
else
{
// Use rotated config file
snprintf(filename, sizeof(filename), BACKUP_DIR"/pihole.toml.%u", version);
}
// Try to open config file
FILE *fp = fopen(filename, mode);
// Return early if opening failed
if(!fp)
{
log_info("Config %sfile %s not available: %s",
version > 0 ? "backup " : "", filename, strerror(errno));
return NULL;
}
// Lock file, may block if the file is currently opened
if(flock(fileno(fp), LOCK_EX) != 0)
{
const int _e = errno;
log_err("Cannot open FTL's config file in exclusive mode: %s", strerror(errno));
log_err("Cannot open config file %s in exclusive mode: %s",
filename, strerror(errno));
fclose(fp);
errno = _e;
return NULL;
}
// Log if we are using a backup file
if(version > 0)
log_info("Using config backup %s", filename);
errno = 0;
return fp;
}
@ -123,17 +153,51 @@ static void printTOMLstring(FILE *fp, const char *s, const bool toml)
continue;
}
// Escape special characters
// Escape special characters with simple escape sequences
switch (ch) {
case 0x08: fprintf(fp, "\\b"); continue;
case 0x09: fprintf(fp, "\\t"); continue;
case 0x0a: fprintf(fp, "\\n"); continue;
case 0x0c: fprintf(fp, "\\f"); continue;
case 0x0d: fprintf(fp, "\\r"); continue;
case '"': fprintf(fp, "\\\""); continue;
case '\\': fprintf(fp, "\\\\"); continue;
default: fprintf(fp, "\\0x%02x", ch & 0xff); continue;
case '\b': fputs("\\b", fp); continue;
case '\t': fputs("\\t", fp); continue;
case '\n': fputs("\\n", fp); continue;
case '\f': fputs("\\f", fp); continue;
case '\r': fputs("\\r", fp); continue;
case '"': fputs("\\\"", fp); continue;
case '\\': fputs("\\\\", fp); continue;
}
#ifndef TOML_UTF8
// The Universal Coded Character Set (UCS, Unicode) is a
// standard set of characters defined by the international
// standard ISO/IEC 10646, Information technology — Universal
// Coded Character Set (UCS) (plus amendments to that standard),
// which is the basis of many character encodings, improving as
// characters from previously unrepresented typing systems are
// added.
// The following code converts a UTF-8 character to UCS and
// prints it as \UXXXXXXXX
int64_t ucs;
int bytes = toml_utf8_to_ucs(s, len, &ucs);
if(bytes > 0)
{
// Print 4-byte UCS as \UXXXXXXXX
fprintf(fp, "\\U%08X", (uint32_t)ucs);
// Advance string pointer
s += bytes - 1;
// Decrease remaining string length
len -= bytes - 1;
continue;
}
#else
// Escape all other control characters as short 2-byte
// UCS sequences
if(iscntrl(ch))
{
fprintf(fp, "\\u%04X", ch);
continue;
}
// Print remaining characters as is
putc(ch, fp);
#endif
}
if(toml) fprintf(fp, "\"");
}
@ -364,6 +428,13 @@ void writeTOMLvalue(FILE * fp, const int indent, const enum conf_type t, union c
break;
case CONF_STRUCT_IN_ADDR:
{
// Special case: 0.0.0.0 -> return empty string
if(v->in_addr.s_addr == INADDR_ANY)
{
printTOMLstring(fp, "", toml);
break;
}
// else: normal address
char addr4[INET_ADDRSTRLEN] = { 0 };
inet_ntop(AF_INET, &v->in_addr, addr4, INET_ADDRSTRLEN);
printTOMLstring(fp, addr4, toml);
@ -371,6 +442,13 @@ void writeTOMLvalue(FILE * fp, const int indent, const enum conf_type t, union c
}
case CONF_STRUCT_IN6_ADDR:
{
// Special case: :: -> return empty string
if(memcmp(&v->in6_addr, &in6addr_any, sizeof(in6addr_any)) == 0)
{
printTOMLstring(fp, "", toml);
break;
}
// else: normal address
char addr6[INET6_ADDRSTRLEN] = { 0 };
inet_ntop(AF_INET6, &v->in6_addr, addr6, INET6_ADDRSTRLEN);
printTOMLstring(fp, addr6, toml);
@ -654,7 +732,12 @@ void readTOMLvalue(struct conf_item *conf_item, const char* key, toml_table_t *t
const toml_datum_t val = toml_string_in(toml, key);
if(val.ok)
{
if(inet_pton(AF_INET, val.u.s, &addr4))
if(strlen(val.u.s) == 0)
{
// Special case: empty string -> 0.0.0.0
conf_item->v.in_addr.s_addr = INADDR_ANY;
}
else if(inet_pton(AF_INET, val.u.s, &addr4))
memcpy(&conf_item->v.in_addr, &addr4, sizeof(addr4));
else
log_warn("Config %s is invalid (not of type IPv4 address)", conf_item->k);
@ -670,7 +753,12 @@ void readTOMLvalue(struct conf_item *conf_item, const char* key, toml_table_t *t
const toml_datum_t val = toml_string_in(toml, key);
if(val.ok)
{
if(inet_pton(AF_INET6, val.u.s, &addr6))
if(strlen(val.u.s) == 0)
{
// Special case: empty string -> ::
memcpy(&conf_item->v.in6_addr, &in6addr_any, sizeof(in6addr_any));
}
else if(inet_pton(AF_INET6, val.u.s, &addr6))
memcpy(&conf_item->v.in6_addr, &addr6, sizeof(addr6));
else
log_warn("Config %s is invalid (not of type IPv6 address)", conf_item->k);
@ -720,249 +808,3 @@ void readTOMLvalue(struct conf_item *conf_item, const char* key, toml_table_t *t
}
}
}
#define FTLCONF_PREFIX "FTLCONF_"
bool readEnvValue(struct conf_item *conf_item, struct config *newconf)
{
// Allocate memory for config key + prefix (sizeof includes the trailing '\0')
const size_t envkey_size = strlen(conf_item->k) + sizeof(FTLCONF_PREFIX);
char *envkey = calloc(envkey_size, sizeof(char));
// Build env key to look for
strcpy(envkey, FTLCONF_PREFIX);
strcat(envkey, conf_item->k);
// Replace all "." by "_" as this is the convention used in v5.x and earlier
for(unsigned int i = 0; i < envkey_size - 1; i++)
if(envkey[i] == '.')
envkey[i] = '_';
// First check if a environmental variable with the given key exists
const char *envvar = getenv(envkey);
// Return early if this environment variable does not exist
if(envvar == NULL)
{
log_debug(DEBUG_CONFIG, "ENV %s is not set", envkey);
free(envkey);
return false;
}
log_debug(DEBUG_CONFIG, "ENV %s = \"%s\"", envkey, envvar);
switch(conf_item->t)
{
case CONF_BOOL:
{
if(strcasecmp(envvar, "true") == 0 || strcasecmp(envvar, "yes") == 0)
conf_item->v.b = true;
else if(strcasecmp(envvar, "false") == 0 || strcasecmp(envvar, "no") == 0)
conf_item->v.b = false;
else
log_warn("ENV %s is not of type bool", envkey);
break;
}
case CONF_ALL_DEBUG_BOOL:
{
if(strcasecmp(envvar, "true") == 0 || strcasecmp(envvar, "yes") == 0)
set_all_debug(newconf, true);
else if(strcasecmp(envvar, "false") == 0 || strcasecmp(envvar, "no") == 0)
set_all_debug(newconf, false);
else
log_warn("ENV %s is not of type bool", envkey);
break;
}
case CONF_INT:
{
int val = 0;
if(sscanf(envvar, "%i", &val) == 1)
conf_item->v.i = val;
else
log_warn("ENV %s is not of type integer", envkey);
break;
}
case CONF_UINT:
{
unsigned int val = 0;
if(sscanf(envvar, "%u", &val) == 1)
conf_item->v.ui = val;
else
log_warn("ENV %s is not of type unsigned integer", envkey);
break;
}
case CONF_UINT16:
{
unsigned int val = 0;
if(sscanf(envvar, "%u", &val) == 1 && val <= UINT16_MAX)
conf_item->v.ui = val;
else
log_warn("ENV %s is not of type unsigned integer (16 bit)", envkey);
break;
}
case CONF_LONG:
{
long val = 0;
if(sscanf(envvar, "%li", &val) == 1)
conf_item->v.l = val;
else
log_warn("ENV %s is not of type long", envkey);
break;
}
case CONF_ULONG:
{
unsigned long val = 0;
if(sscanf(envvar, "%lu", &val) == 1)
conf_item->v.ul = val;
else
log_warn("ENV %s is not of type unsigned long", envkey);
break;
}
case CONF_DOUBLE:
{
double val = 0;
if(sscanf(envvar, "%lf", &val) == 1)
conf_item->v.d = val;
else
log_warn("ENV %s is not of type double", envkey);
break;
}
case CONF_STRING:
case CONF_STRING_ALLOCATED:
{
if(conf_item->t == CONF_STRING_ALLOCATED)
free(conf_item->v.s);
conf_item->v.s = strdup(envvar);
conf_item->t = CONF_STRING_ALLOCATED;
break;
}
case CONF_ENUM_PTR_TYPE:
{
const int ptr_type = get_ptr_type_val(envvar);
if(ptr_type != -1)
conf_item->v.ptr_type = ptr_type;
else
log_warn("ENV %s is invalid, allowed options are: %s", envkey, conf_item->h);
break;
}
case CONF_ENUM_BUSY_TYPE:
{
const int busy_reply = get_busy_reply_val(envvar);
if(busy_reply != -1)
conf_item->v.busy_reply = busy_reply;
else
log_warn("ENV %s is invalid, allowed options are: %s", envkey, conf_item->h);
break;
}
case CONF_ENUM_BLOCKING_MODE:
{
const int blocking_mode = get_blocking_mode_val(envvar);
if(blocking_mode != -1)
conf_item->v.blocking_mode = blocking_mode;
else
log_warn("ENV %s is invalid, allowed options are: %s", envkey, conf_item->h);
break;
}
case CONF_ENUM_REFRESH_HOSTNAMES:
{
const int refresh_hostnames = get_refresh_hostnames_val(envvar);
if(refresh_hostnames != -1)
conf_item->v.refresh_hostnames = refresh_hostnames;
else
log_warn("ENV %s is invalid, allowed options are: %s", envkey, conf_item->h);
break;
}
case CONF_ENUM_LISTENING_MODE:
{
const int listeningMode = get_listeningMode_val(envvar);
if(listeningMode != -1)
conf_item->v.listeningMode = listeningMode;
else
log_warn("ENV %s is invalid, allowed options are: %s", envkey, conf_item->h);
break;
}
case CONF_ENUM_WEB_THEME:
{
const int web_theme = get_web_theme_val(envvar);
if(web_theme != -1)
conf_item->v.web_theme = web_theme;
else
log_warn("ENV %s is invalid, allowed options are: %s", envkey, conf_item->h);
break;
}
case CONF_ENUM_TEMP_UNIT:
{
const int temp_unit = get_temp_unit_val(envvar);
if(temp_unit != -1)
conf_item->v.temp_unit = temp_unit;
else
log_warn("ENV %s is invalid, allowed options are: %s", envkey, conf_item->h);
break;
}
case CONF_ENUM_PRIVACY_LEVEL:
{
int val = 0;
if(sscanf(envvar, "%i", &val) == 1 && val >= PRIVACY_SHOW_ALL && val <= PRIVACY_MAXIMUM)
conf_item->v.i = val;
else
log_warn("ENV %s is invalid (not of type integer or outside allowed bounds)", envkey);
break;
}
case CONF_STRUCT_IN_ADDR:
{
struct in_addr addr4 = { 0 };
if(inet_pton(AF_INET, envvar, &addr4))
memcpy(&conf_item->v.in_addr, &addr4, sizeof(addr4));
else
log_warn("ENV %s is invalid (not of type IPv4 address)", envkey);
break;
}
case CONF_STRUCT_IN6_ADDR:
{
struct in6_addr addr6 = { 0 };
if(inet_pton(AF_INET6, envvar, &addr6))
memcpy(&conf_item->v.in6_addr, &addr6, sizeof(addr6));
else
log_warn("ENV %s is invalid (not of type IPv6 address)", envkey);
break;
}
case CONF_JSON_STRING_ARRAY:
{
// Make a copy of envvar as strtok modified the input string
char *envvar_copy = strdup(envvar);
// Free previously allocated JSON array
cJSON_Delete(conf_item->v.json);
conf_item->v.json = cJSON_CreateArray();
// Parse envvar array and generate a JSON array (env var
// arrays are ;-delimited)
const char delim[] =";";
const char *elem = strtok(envvar_copy, delim);
while(elem != NULL)
{
// Only import non-empty entries
if(strlen(elem) > 0)
{
// Add string to our JSON array
cJSON *item = cJSON_CreateString(elem);
cJSON_AddItemToArray(conf_item->v.json, item);
}
// Search for the next element
elem = strtok(NULL, delim);
}
free(envvar_copy);
break;
}
case CONF_PASSWORD:
{
if(!set_and_check_password(conf_item, envvar))
{
log_warn("ENV %s is invalid", envkey);
break;
}
}
}
// Free allocated env var name
free(envkey);
return true;
}

View File

@ -17,12 +17,11 @@
#include "tomlc99/toml.h"
void indentTOML(FILE *fp, const unsigned int indent);
FILE *openFTLtoml(const char *mode) __attribute((malloc)) __attribute((nonnull(1)));
FILE *openFTLtoml(const char *mode, const unsigned int version) __attribute((malloc)) __attribute((nonnull(1)));
void closeFTLtoml(FILE *fp);
void print_comment(FILE *fp, const char *str, const char *intro, const unsigned int width, const unsigned int indent);
void print_toml_allowed_values(cJSON *allowed_values, FILE *fp, const unsigned int width, const unsigned int indent);
void writeTOMLvalue(FILE * fp, const int indent, const enum conf_type t, union conf_value *v);
void readTOMLvalue(struct conf_item *conf_item, const char* key, toml_table_t *toml, struct config *newconf);
bool readEnvValue(struct conf_item *conf_item, struct config *newconf);
#endif //CONFIG_WRITER_H

View File

@ -10,7 +10,7 @@
#include "FTL.h"
#include "toml_reader.h"
#include "setupVars.h"
#include "config/setupVars.h"
#include "log.h"
// getprio(), setprio()
#include <sys/resource.h>
@ -23,20 +23,65 @@
#include "config/toml_helper.h"
// delete_all_sessions()
#include "api/api.h"
// readEnvValue()
#include "config/env.h"
// Private prototypes
static toml_table_t *parseTOML(void);
static toml_table_t *parseTOML(const unsigned int version);
static void reportDebugFlags(void);
// Migrate config from old to new, returns true if a restart is required
static bool migrate_config(toml_table_t *toml, struct config *newconf)
{
bool restart = false;
toml_table_t *dns = toml_table_in(toml, "dns");
if(dns)
{
toml_table_t *revServer = toml_table_in(dns, "revServer");
if(revServer)
{
// Read old config
toml_datum_t active = toml_bool_in(revServer, "active");
toml_datum_t cidr = toml_string_in(revServer, "cidr");
toml_datum_t target = toml_string_in(revServer, "target");
toml_datum_t domain = toml_string_in(revServer, "domain");
// Necessary condition: all values must exist and CIDR and target must not be empty
if(active.ok && cidr.ok && target.ok && domain.ok && strlen(cidr.u.s) > 0 && strlen(target.u.s))
{
// Build comma-separated string of all values
char *old = calloc((active.u.b ? 4 : 5) + strlen(cidr.u.s) + strlen(target.u.s) + strlen(domain.u.s) + 4, sizeof(char));
if(old)
{
// Add to new config
sprintf(old, "%s,%s,%s,%s", active.u.s ? "true" : "false", cidr.u.s, target.u.s, domain.u.s);
log_debug(DEBUG_CONFIG, "Config setting dns.revServer MIGRATED: %s", old);
cJSON_AddItemToArray(newconf->dns.revServers.v.json, cJSON_CreateString(old));
restart = true;
}
}
else
log_warn("Config setting dns.revServer INVALID - ignoring: %s %s %s %s", active.ok ? active.u.s : "NULL", cidr.ok ? cidr.u.s : "NULL", target.ok ? target.u.s : "NULL", domain.ok ? domain.u.s : "NULL");
}
else
log_info("dns.revServer DOES NOT EXIST");
}
else
log_info("dns DOES NOT EXIST");
return restart;
}
bool readFTLtoml(struct config *oldconf, struct config *newconf,
toml_table_t *toml, const bool verbose, bool *restart)
toml_table_t *toml, const bool verbose, bool *restart,
const unsigned int version)
{
// Parse lines in the config file if we did not receive a pointer to a TOML
// table from an imported Teleporter file
bool teleporter = (toml != NULL);
if(!teleporter)
{
toml = parseTOML();
toml = parseTOML(version);
if(!toml)
return false;
}
@ -59,8 +104,8 @@ bool readFTLtoml(struct config *oldconf, struct config *newconf,
}
set_debug_flags(newconf);
log_debug(DEBUG_CONFIG, "Reading %s TOML config file: full config",
teleporter ? "teleporter" : "default");
log_debug(DEBUG_CONFIG, "Reading %s TOML config file",
teleporter ? "teleporter" : version == 0 ? "default" : "backup");
// Read all known config items
for(unsigned int i = 0; i < CONFIG_ELEMENTS; i++)
@ -123,27 +168,30 @@ bool readFTLtoml(struct config *oldconf, struct config *newconf,
}
}
// Migrate config from old to new
if(migrate_config(toml, newconf) && restart != NULL)
*restart = true;
// Report debug config if enabled
set_debug_flags(newconf);
if(verbose)
reportDebugFlags();
// Print FTL environment variables (if used)
printFTLenv();
// Free memory allocated by the TOML parser and return success
toml_free(toml);
return true;
}
// Parse TOML config file
static toml_table_t *parseTOML(void)
static toml_table_t *parseTOML(const unsigned int version)
{
// Try to open default config file. Use fallback if not found
FILE *fp;
if((fp = openFTLtoml("r")) == NULL)
{
log_warn("No config file available (%s), using defaults",
strerror(errno));
if((fp = openFTLtoml("r", version)) == NULL)
return NULL;
}
// Parse lines in the config file
char errbuf[200];
@ -167,7 +215,7 @@ bool getLogFilePathTOML(void)
{
log_debug(DEBUG_CONFIG, "Reading TOML config file: log file path");
toml_table_t *conf = parseTOML();
toml_table_t *conf = parseTOML(0);
if(!conf)
return false;
@ -209,11 +257,10 @@ static void reportDebugFlags(void)
// Read all known debug config items
for(unsigned int debug_flag = 1; debug_flag < DEBUG_ELEMENTS; debug_flag++)
{
const char *name;
// Get name of debug flag
// We do not need to add an offset as this loop starts counting
// at 1
debugstr(debug_flag, &name);
const char *name = debugstr(debug_flag);
// Calculate number of spaces to nicely align output
int spaces = 20 - strlen(name);
// Print debug flag

View File

@ -14,7 +14,8 @@
#include "tomlc99/toml.h"
bool readFTLtoml(struct config *oldconf, struct config *newconf,
toml_table_t *toml, const bool verbose, bool *restart);
toml_table_t *toml, const bool verbose, bool *restart,
const unsigned int version);
bool getLogFilePathTOML(void);
#endif //TOML_READER_H

View File

@ -19,63 +19,39 @@
#include "datastructure.h"
// watch_config()
#include "config/inotify.h"
// files_different()
#include "files.h"
static void migrate_config(void)
{
// Migrating dhcp.domain -> dns.domain
if(strcmp(config.dns.domain.v.s, config.dns.domain.d.s) == 0)
{
// If the domain is the same as the default, check if the dhcp domain
// is different from the default. If so, migrate it
if(strcmp(config.dhcp.domain.v.s, config.dhcp.domain.d.s) != 0)
{
// Migrate dhcp.domain -> dns.domain
log_info("Migrating dhcp.domain = \"%s\" -> dns.domain", config.dhcp.domain.v.s);
if(config.dns.domain.t == CONF_STRING_ALLOCATED)
free(config.dns.domain.v.s);
config.dns.domain.v.s = strdup(config.dhcp.domain.v.s);
config.dns.domain.t = CONF_STRING_ALLOCATED;
}
}
}
// defined in config/config.c
extern uint8_t last_checksum[SHA256_DIGEST_SIZE];
bool writeFTLtoml(const bool verbose)
{
// Stop watching for changes in the config file
watch_config(false);
// Try to open global config file
// Try to open a temporary config file for writing
FILE *fp;
if((fp = openFTLtoml("w")) == NULL)
if((fp = openFTLtoml("w", 0)) == NULL)
{
log_warn("Cannot write to FTL config file (%s), content not updated", strerror(errno));
// Restart watching for changes in the config file
watch_config(true);
return false;
}
// Log that we are (re-)writing the config file if either in verbose or
// debug mode
if(verbose || config.debug.config.v.b)
log_info("Writing config file");
// Write header
fputs("# This file is managed by pihole-FTL\n#\n", fp);
fputs("# Do not edit the file while FTL is\n", fp);
fputs("# running or your changes may be overwritten\n#\n", fp);
fprintf(fp, "# Pi-hole configuration file (%s)\n", get_FTL_version());
#ifdef TOML_UTF8
fputs("# Encoding: UTF-8\n", fp);
#else
fputs("# Encoding: ASCII + UCS\n", fp);
#endif
fputs("# This file is managed by pihole-FTL\n", fp);
char timestring[TIMESTR_SIZE] = "";
get_timestr(timestring, time(NULL), false, false);
fputs("# Last updated on ", fp);
fputs(timestring, fp);
fputs("\n# by FTL ", fp);
fputs(get_FTL_version(), fp);
fputs("\n\n", fp);
// Perform possible config migration
migrate_config();
// Iterate over configuration and store it into the file
char *last_path = (char*)"";
unsigned int modified = 0, env_vars = 0;
for(unsigned int i = 0; i < CONFIG_ELEMENTS; i++)
{
// Get pointer to memory location of this conf_item
@ -112,7 +88,10 @@ bool writeFTLtoml(const bool verbose)
// Print info if this value is overwritten by an env var
if(conf_item->f & FLAG_ENV_VAR)
{
print_comment(fp, ">>> This config is overwritten by an environmental variable <<<", "", 85, level-1);
env_vars++;
}
// Write value
indentTOML(fp, level-1);
@ -132,17 +111,72 @@ bool writeFTLtoml(const bool verbose)
{
fprintf(fp, " ### CHANGED, default = ");
writeTOMLvalue(fp, -1, conf_item->t, &conf_item->d);
modified++;
}
// Add newlines after each entry
fputs("\n\n", fp);
}
// Log some statistics in verbose mode
if(verbose || config.debug.config.v.b)
{
log_info("Wrote config file:");
log_info(" - %zu total entries", CONFIG_ELEMENTS);
log_info(" - %zu %s default", CONFIG_ELEMENTS - modified,
CONFIG_ELEMENTS - modified == 1 ? "entry is" : "entries are");
log_info(" - %u %s modified", modified,
modified == 1 ? "entry is" : "entries are");
log_info(" - %u %s forced through environment", env_vars,
env_vars == 1 ? "entry is" : "entries are");
}
// Close file and release exclusive lock
closeFTLtoml(fp);
// Restart watching for changes in the config file
watch_config(true);
// Move temporary file to the final location if it is different
// We skip the first 8 lines as they contain the header and will always
// be different
if(files_different(GLOBALTOMLPATH".tmp", GLOBALTOMLPATH, 8))
{
// Stop watching for changes in the config file
watch_config(false);
// Rotate config file
rotate_files(GLOBALTOMLPATH, NULL);
// Move file
if(rename(GLOBALTOMLPATH".tmp", GLOBALTOMLPATH) != 0)
{
log_warn("Cannot move temporary config file to final location (%s), content not updated", strerror(errno));
// Restart watching for changes in the config file
watch_config(true);
return false;
}
// Restart watching for changes in the config file
watch_config(true);
// Log that we have written the config file if either in verbose or
// debug mode
if(verbose || config.debug.config.v.b)
log_info("Config file written to %s", GLOBALTOMLPATH);
}
else
{
// Remove temporary file
if(unlink(GLOBALTOMLPATH".tmp") != 0)
{
log_warn("Cannot remove temporary config file (%s), content not updated", strerror(errno));
return false;
}
// Log that the config file has not changed if in debug mode
log_debug(DEBUG_CONFIG, "pihole.toml unchanged");
}
if(!sha256sum(GLOBALTOMLPATH, last_checksum))
log_err("Unable to create checksum of %s", GLOBALTOMLPATH);
return true;
}

View File

@ -35,6 +35,10 @@
#include "webserver/webserver.h"
// free_api()
#include "api/api.h"
// setlocale()
#include <locale.h>
// freeEnvVars()
#include "config/env.h"
pthread_t threads[THREADS_MAX] = { 0 };
bool resolver_ready = false;
@ -176,20 +180,39 @@ char *getUserName(void)
// hyphen.
#define HOSTNAMESIZE 256
static char nodename[HOSTNAMESIZE] = { 0 };
static char dname[HOSTNAMESIZE] = { 0 };
// Returns the hostname of the system
const char *hostname(void)
{
// Ask kernel for node name if not known
// This is equivalent to "uname -n"
//
// According to man gethostname(2), this is exactly the same as calling
// getdomainname() just with one step less
if(nodename[0] == '\0')
{
struct utsname buf;
if(uname(&buf) == 0)
{
strncpy(nodename, buf.nodename, HOSTNAMESIZE);
nodename[HOSTNAMESIZE-1] = '\0';
strncpy(dname, buf.domainname, HOSTNAMESIZE);
}
nodename[HOSTNAMESIZE - 1] = '\0';
dname[HOSTNAMESIZE - 1] = '\0';
}
return nodename;
}
// Returns the domain name of the system
const char *domainname(void)
{
if(dname[0] == '\0')
hostname();
return dname;
}
void delay_startup(void)
{
// Exit early if not sleeping
@ -349,31 +372,39 @@ void cleanup(const int ret)
// This should be the last action when c
destroy_shmem();
// Free environment variables
freeEnvVars();
char buffer[42] = { 0 };
format_time(buffer, 0, timer_elapsed_msec(EXIT_TIMER));
log_info("########## FTL terminated after%s (code %i)! ##########", buffer, ret);
}
static clock_t last_clock = -1;
static float last_clock = 0.0f;
static float cpu_usage = 0.0f;
void calc_cpu_usage(void)
void calc_cpu_usage(const unsigned int interval)
{
// Get the current CPU usage
const clock_t clk = clock();
if(clk == (clock_t)-1)
// Get the current resource usage
// RUSAGE_SELF means here "the calling process" which is the sum of all
// resources used by all threads in the process
struct rusage usage = { 0 };
if(getrusage(RUSAGE_SELF, &usage) != 0)
{
log_warn("calc_cpu_usage() failed: %s", strerror(errno));
log_err("Unable to obtain CPU usage: %s (%i)", strerror(errno), errno);
return;
}
if(last_clock == -1)
{
// Initialize the value and return
last_clock = clk;
return;
}
// Percentage of CPU time spent executing instructions
cpu_usage = 100.0f * ((float)clk - (float)last_clock) / CLOCKS_PER_SEC;
last_clock = clk;
// Calculate the CPU usage: it is the total time spent in user mode and
// kernel mode by this process since the total time since the last call
// to this function. 100% means one core is fully used, 200% means two
// cores are fully used, etc.
const float this_clock = usage.ru_utime.tv_sec + usage.ru_stime.tv_sec + 1e-6 * (usage.ru_utime.tv_usec + usage.ru_stime.tv_usec);
// Calculate the CPU usage in this interval
cpu_usage = 100.0 * (this_clock - last_clock) / interval;
// Store the current time for the next call to this function
last_clock = this_clock;
}
float __attribute__((pure)) get_cpu_percentage(void)
@ -444,3 +475,16 @@ bool ipv6_enabled(void)
// IPv6-capable interface
return true;
}
void init_locale(void)
{
// Set locale to system default, needed for libidn to work properly
// Without this, libidn will not be able to convert UTF-8 to ASCII
// (error message "Character encoding conversion error")
setlocale(LC_ALL, "");
// Set locale for numeric values to C to ensure that we always use
// the dot as decimal separator (even if the system locale uses a
// comma, e.g., in German)
setlocale(LC_NUMERIC, "C");
}

View File

@ -17,13 +17,15 @@ void go_daemon(void);
void savepid(void);
char *getUserName(void);
const char *hostname(void);
const char *domainname(void);
void delay_startup(void);
bool is_fork(const pid_t mpid, const pid_t pid) __attribute__ ((const));
void cleanup(const int ret);
void set_nice(void);
void calc_cpu_usage(void);
void calc_cpu_usage(const unsigned int interval);
float get_cpu_percentage(void) __attribute__((pure));
bool ipv6_enabled(void);
void init_locale(void);
#include <sys/syscall.h>
#include <unistd.h>

View File

@ -195,7 +195,7 @@ static int get_aliasclient_ID(sqlite3 *db, const clientsData *client)
const clientsData *alias_client = getClient(aliasclientID, true);
// Skip clients that are not alias-clients
if(!alias_client->flags.aliasclient)
if(alias_client == NULL || !alias_client->flags.aliasclient)
continue;
// Compare MAC address of the current client to the

View File

@ -251,9 +251,15 @@ void SQLite3LogCallback(void *pArg, int iErrCode, const char *zMsg)
generate_backtrace();
if(iErrCode == SQLITE_WARNING)
log_warn("SQLite3 message: %s (%d)", zMsg, iErrCode);
log_warn("SQLite3: %s (%d)", zMsg, iErrCode);
else if(iErrCode == SQLITE_NOTICE || iErrCode == SQLITE_SCHEMA)
// SQLITE_SCHEMA is returned when the database schema has changed
// This is not necessarily an error, as sqlite3_step() will re-prepare
// the statement and try again. If it cannot, it will return an error
// and this will be handled over there.
log_debug(DEBUG_ANY, "SQLite3: %s (%d)", zMsg, iErrCode);
else
log_err("SQLite3 message: %s (%d)", zMsg, iErrCode);
log_err("SQLite3: %s (%d)", zMsg, iErrCode);
}
void db_init(void)
@ -543,6 +549,26 @@ void db_init(void)
dbversion = db_get_int(db, DB_VERSION);
}
// Update to version 17 if lower
if(dbversion < 17)
{
// Update to version 17: Rename regex_id column to regex_id_old
log_info("Updating long-term database to version 17");
if(!rename_query_storage_column_regex_id(db))
{
log_info("regex_id cannot be renamed to list_id, database not available");
dbclose(&db);
return;
}
// Get updated version
dbversion = db_get_int(db, DB_VERSION);
}
// Last check after all migrations, if this happens, it will cause the
// CI to fail the tests
if(dbversion != MEMDB_VERSION)
log_err("Database version %i does not match MEMDB_VERSION %i", dbversion, MEMDB_VERSION);
lock_shm();
import_aliasclients(db);
unlock_shm();

View File

@ -56,6 +56,27 @@ static bool delete_old_queries_in_DB(sqlite3 *db)
return true;
}
static bool analyze_database(sqlite3 *db)
{
// Optimize the database by running ANALYZE
// The ANALYZE command gathers statistics about tables and indices and
// stores the collected information in internal tables of the database
// where the query optimizer can access the information and use it to
// help make better query planning choices.
// Measure time
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
SQL_bool(db, "ANALYZE;");
clock_gettime(CLOCK_MONOTONIC, &end);
// Print final message
log_info("Optimized database in %.3f seconds",
(double)(end.tv_sec - start.tv_sec) + 1e-9*(end.tv_nsec - start.tv_nsec));
return true;
}
#define DBOPEN_OR_AGAIN() { if(!db) db = dbopen(false, false); if(!db) { thread_sleepms(DB, 5000); continue; } }
#define BREAK_IF_KILLED() { if(killed) break; }
#define DBCLOSE_OR_BREAK() { dbclose(&db); BREAK_IF_KILLED(); }
@ -72,6 +93,17 @@ void *DB_thread(void *val)
time_t before = time(NULL);
time_t lastDBsave = before - before%config.database.DBinterval.v.ui;
// Other timestamps, made independent from the exact time FTL was
// started
time_t lastAnalyze = before - before % DATABASE_ANALYZE_INTERVAL;
time_t lastMACVendor = before - before % DATABASE_MACVENDOR_INTERVAL;
// Add some randomness (up to ome hour) to these timestamps to avoid
// them running at the same time. This is not a security feature, so
// using rand() is fine.
lastAnalyze += rand() % 3600;
lastMACVendor += rand() % 3600;
// This thread runs until shutdown of the process. We keep this thread
// running when pihole-FTL.db is corrupted because reloading of privacy
// level, and the gravity database (initially and after gravity)
@ -135,16 +167,30 @@ void *DB_thread(void *val)
set_event(PARSE_NEIGHBOR_CACHE);
}
// Intermediate cancellation-point
if(killed)
break;
// Optimize database once per week
if(now - lastAnalyze >= DATABASE_ANALYZE_INTERVAL)
{
DBOPEN_OR_AGAIN();
analyze_database(db);
lastAnalyze = now;
DBCLOSE_OR_BREAK();
}
// Intermediate cancellation-point
if(killed)
break;
// Update MAC vendor strings once a month (the MAC vendor
// database is not updated very often)
if(now % 2592000L == 0)
if(now - lastMACVendor >= DATABASE_MACVENDOR_INTERVAL)
{
DBOPEN_OR_AGAIN();
updateMACVendorRecords(db);
lastMACVendor = now;
DBCLOSE_OR_BREAK();
}

View File

@ -46,7 +46,6 @@ sqlite3_stmt_vec *blacklist_stmt = NULL;
// Private variables
static sqlite3 *gravity_db = NULL;
static sqlite3_stmt* table_stmt = NULL;
static sqlite3_stmt* auditlist_stmt = NULL;
bool gravityDB_opened = false;
static bool gravity_abp_format = false;
@ -174,35 +173,6 @@ bool gravityDB_open(void)
return false;
}
// Prepare audit statement
log_debug(DEBUG_DATABASE, "gravityDB_open(): Preparing audit query");
// We support adding audit domains with a wildcard character (*)
// Example 1: google.de
// matches only google.de
// Example 2: *.google.de
// matches all subdomains of google.de
// BUT NOT google.de itself
// Example 3: *google.de
// matches 'google.de' and all of its subdomains but
// also other domains ending in google.de, like
// abcgoogle.de
rc = sqlite3_prepare_v3(gravity_db,
"SELECT domain, "
"CASE WHEN substr(domain, 1, 1) = '*' " // Does the database string start in '*' ?
"THEN '*' || substr(:input, - length(domain) + 1) " // If so: Crop the input domain and prepend '*'
"ELSE :input " // If not: Use input domain directly for comparison
"END matcher "
"FROM domain_audit WHERE matcher = domain" // Match where (modified) domain equals the database domain
";", -1, SQLITE_PREPARE_PERSISTENT, &auditlist_stmt, NULL);
if( rc != SQLITE_OK )
{
log_err("gravityDB_open(\"SELECT EXISTS(... domain_audit ...)\") - SQL error prepare: %s", sqlite3_errstr(rc));
gravityDB_close();
return false;
}
// Set SQLite3 busy timeout to a user-defined value (defaults to 1 second)
// to avoid immediate failures when the gravity database is still busy
// writing the changes to disk
@ -259,8 +229,8 @@ static char* get_client_querystr(const char *table, const char *column, const ch
}
// Determine whether to show IP or hardware address
static inline const char *show_client_string(const char *hwaddr, const char *hostname,
const char *ip)
static const char *show_client_string(const char *hwaddr, const char *hostname,
const char *ip)
{
if(hostname != NULL && strlen(hostname) > 0)
{
@ -320,7 +290,7 @@ static bool get_client_groupids(clientsData* client)
ip, sqlite3_errstr(rc));
sqlite3_reset(table_stmt);
sqlite3_finalize(table_stmt);
return NULL;
return false;
}
// Perform query
@ -398,7 +368,7 @@ static bool get_client_groupids(clientsData* client)
{
log_debug(DEBUG_CLIENTS, "--> No result.");
}
else if(hwaddr != NULL && strlen(hwaddr) > 3 && strncasecmp(hwaddr, "ip-", 3) == 0)
else if(strlen(hwaddr) > 3 && strncasecmp(hwaddr, "ip-", 3) == 0)
{
free(hwaddr);
hwaddr = NULL;
@ -406,7 +376,7 @@ static bool get_client_groupids(clientsData* client)
log_debug(DEBUG_CLIENTS, "Skipping mock-device hardware address lookup");
}
// Set MAC address from database information if available and the MAC address is not already set
else if(hwaddr != NULL && client->hwlen != 6)
else if(client->hwlen != 6)
{
// Proper MAC parsing
unsigned char data[6];
@ -425,9 +395,8 @@ static bool get_client_groupids(clientsData* client)
// MAC address fallback: Try to synthesize MAC address from internal buffer
if(hwaddr == NULL && client->hwlen == 6)
{
const size_t strlen = sizeof("AA:BB:CC:DD:EE:FF");
hwaddr = calloc(18, strlen);
snprintf(hwaddr, strlen, "%02X:%02X:%02X:%02X:%02X:%02X",
hwaddr = calloc(18, sizeof(char)); // 18 == sizeof("AA:BB:CC:DD:EE:FF")
snprintf(hwaddr, 18, "%02X:%02X:%02X:%02X:%02X:%02X",
client->hwaddr[0], client->hwaddr[1], client->hwaddr[2],
client->hwaddr[3], client->hwaddr[4], client->hwaddr[5]);
@ -888,7 +857,7 @@ bool gravityDB_prepare_client_statements(clientsData *client)
// Prepare gravity statement
log_debug(DEBUG_DATABASE, "gravityDB_open(): Preparing vw_gravity statement for client %s", clientip);
querystr = get_client_querystr("vw_gravity", "domain", getstr(client->groupspos));
querystr = get_client_querystr("vw_gravity", "adlist_id", getstr(client->groupspos));
rc = sqlite3_prepare_v3(gravity_db, querystr, -1, SQLITE_PREPARE_PERSISTENT, &stmt, NULL);
if( rc != SQLITE_OK )
{
@ -901,7 +870,7 @@ bool gravityDB_prepare_client_statements(clientsData *client)
// Prepare antigravity statement
log_debug(DEBUG_DATABASE, "gravityDB_open(): Preparing vw_antigravity statement for client %s", clientip);
querystr = get_client_querystr("vw_antigravity", "domain", getstr(client->groupspos));
querystr = get_client_querystr("vw_antigravity", "adlist_id", getstr(client->groupspos));
rc = sqlite3_prepare_v3(gravity_db, querystr, -1, SQLITE_PREPARE_PERSISTENT, &stmt, NULL);
if( rc != SQLITE_OK )
{
@ -984,10 +953,6 @@ void gravityDB_close(void)
free_sqlite3_stmt_vec(&gravity_stmt);
free_sqlite3_stmt_vec(&antigravity_stmt);
// Finalize audit list statement
sqlite3_finalize(auditlist_stmt);
auditlist_stmt = NULL;
// Close table
sqlite3_close(gravity_db);
gravity_db = NULL;
@ -1188,7 +1153,7 @@ static enum db_result domain_in_list(const char *domain, sqlite3_stmt *stmt, con
// Bind domain to prepared statement
// SQLITE_STATIC: Use the string without first duplicating it internally.
// We can do this as domain has dynamic scope that exceeds that of the binding.
// We need to bind the domain only once even to the prepared audit statement as:
// We need to bind the domain only once:
// When the same named SQL parameter is used more than once, second and
// subsequent occurrences have the same index as the first occurrence.
// (https://www.sqlite.org/c3ref/bind_blob.html)
@ -1279,6 +1244,11 @@ enum db_result in_allowlist(const char *domain, DNSCacheData *dns_cache, clients
// Check if this client needs a rechecking of group membership
gravityDB_client_check_again(client);
// Check again as the client may have been reloaded if this is a TCP
// worker
if(whitelist_stmt == NULL)
return LIST_NOT_AVAILABLE;
// Get whitelist statement from vector of prepared statements if available
sqlite3_stmt *stmt = whitelist_stmt->get(whitelist_stmt, client->id);
@ -1297,7 +1267,7 @@ enum db_result in_allowlist(const char *domain, DNSCacheData *dns_cache, clients
// We have to check both the exact whitelist (using a prepared database statement)
// as well the compiled regex whitelist filters to check if the current domain is
// whitelisted.
return domain_in_list(domain, stmt, "whitelist", &dns_cache->domainlist_id);
return domain_in_list(domain, stmt, "whitelist", &dns_cache->list_id);
}
cJSON *gen_abp_patterns(const char *domain, const bool antigravity)
@ -1412,6 +1382,11 @@ enum db_result in_gravity(const char *domain, clientsData *client, const bool an
// Check if this client needs a rechecking of group membership
gravityDB_client_check_again(client);
// Check again as the client may have been reloaded if this is a TCP
// worker
if(gravity_stmt == NULL || antigravity_stmt == NULL)
return LIST_NOT_AVAILABLE;
// Get whitelist statement from vector of prepared statements
sqlite3_stmt *stmt = antigravity ?
antigravity_stmt->get(antigravity_stmt, client->id) :
@ -1486,6 +1461,11 @@ enum db_result in_denylist(const char *domain, DNSCacheData *dns_cache, clientsD
// Check if this client needs a rechecking of group membership
gravityDB_client_check_again(client);
// Check again as the client may have been reloaded if this is a TCP
// worker
if(blacklist_stmt == NULL)
return LIST_NOT_AVAILABLE;
// Get whitelist statement from vector of prepared statements
sqlite3_stmt *stmt = blacklist_stmt->get(blacklist_stmt, client->id);
@ -1501,18 +1481,7 @@ enum db_result in_denylist(const char *domain, DNSCacheData *dns_cache, clientsD
if(stmt == NULL)
stmt = blacklist_stmt->get(blacklist_stmt, client->id);
return domain_in_list(domain, stmt, "blacklist", &dns_cache->domainlist_id);
}
bool in_auditlist(const char *domain)
{
// If audit list statement is not ready and cannot be initialized (e.g. no access
// to the database), we return false (not in audit list) to prevent an FTL crash
if(auditlist_stmt == NULL)
return false;
// We check the domain_audit table for the given domain
return domain_in_list(domain, auditlist_stmt, "auditlist", NULL) == FOUND;
return domain_in_list(domain, stmt, "blacklist", &dns_cache->list_id);
}
bool gravityDB_get_regex_client_groups(clientsData* client, const unsigned int numregex, const regexData *regex,
@ -1626,7 +1595,7 @@ bool gravityDB_addToTable(const enum gravity_list_type listtype, tablerow *row,
// The item is the item for all POST requests
if(listtype == GRAVITY_GROUPS)
{
querystr = "INSERT INTO \"group\" (name,enabled,description) VALUES (:item,:enabled,:description);";
querystr = "INSERT INTO \"group\" (name,enabled,description) VALUES (:item,:enabled,:comment);";
}
else if(listtype == GRAVITY_ADLISTS)
{
@ -1648,8 +1617,8 @@ bool gravityDB_addToTable(const enum gravity_list_type listtype, tablerow *row,
if(row->name == NULL)
{
// Name is not to be changed
querystr = "INSERT INTO \"group\" (name,enabled,description) VALUES (:item,:enabled,:description) "
"ON CONFLICT(name) DO UPDATE SET enabled = :enabled, description = :description;";
querystr = "INSERT INTO \"group\" (name,enabled,description) VALUES (:item,:enabled,:comment) "
"ON CONFLICT(name) DO UPDATE SET enabled = :enabled, description = :comment;";
}
else
{
@ -1746,15 +1715,15 @@ bool gravityDB_addToTable(const enum gravity_list_type listtype, tablerow *row,
{
if(strcasecmp("allow", row->type) == 0 &&
strcasecmp("exact", row->kind) == 0)
oldtype = 0;
oldtype = 0;
else if(strcasecmp("deny", row->type) == 0 &&
strcasecmp("exact", row->kind) == 0)
oldtype = 1;
oldtype = 1;
else if(strcasecmp("allow", row->type) == 0 &&
strcasecmp("regex", row->kind) == 0)
oldtype = 2;
oldtype = 2;
else if(strcasecmp("deny", row->type) == 0 &&
strcasecmp("regex", row->kind) == 0)
strcasecmp("regex", row->kind) == 0)
oldtype = 3;
else
{
@ -1838,134 +1807,251 @@ bool gravityDB_addToTable(const enum gravity_list_type listtype, tablerow *row,
return okay;
}
bool gravityDB_delFromTable(const enum gravity_list_type listtype, const char* argument, const char **message)
bool gravityDB_delFromTable(const enum gravity_list_type listtype, const cJSON* array, unsigned int *deleted, const char **message)
{
// Return early if database is not available
if(gravity_db == NULL)
{
*message = "Database not available";
return false;
}
int type = -1;
switch (listtype)
// Return early if passed JSON argument is not an array
if(!cJSON_IsArray(array))
{
case GRAVITY_DOMAINLIST_ALLOW_EXACT:
type = 0;
break;
case GRAVITY_DOMAINLIST_DENY_EXACT:
type = 1;
break;
case GRAVITY_DOMAINLIST_ALLOW_REGEX:
type = 2;
break;
case GRAVITY_DOMAINLIST_DENY_REGEX:
type = 3;
break;
case GRAVITY_GROUPS:
case GRAVITY_ADLISTS:
case GRAVITY_CLIENTS:
// No type required for these tables
break;
// Aggregate types cannot be handled by this routine
case GRAVITY_GRAVITY:
case GRAVITY_ANTIGRAVITY:
case GRAVITY_DOMAINLIST_ALLOW_ALL:
case GRAVITY_DOMAINLIST_DENY_ALL:
case GRAVITY_DOMAINLIST_ALL_EXACT:
case GRAVITY_DOMAINLIST_ALL_REGEX:
case GRAVITY_DOMAINLIST_ALL_ALL:
default:
return false;
*message = "Argument is not an array";
log_err("gravityDB_delFromTable(%d): %s",
listtype, *message);
return false;
}
// Prepare SQLite statement
const bool isDomain = listtype == GRAVITY_DOMAINLIST_ALLOW_EXACT ||
listtype == GRAVITY_DOMAINLIST_DENY_EXACT ||
listtype == GRAVITY_DOMAINLIST_ALLOW_REGEX ||
listtype == GRAVITY_DOMAINLIST_DENY_REGEX ||
listtype == GRAVITY_DOMAINLIST_ALL_ALL; // batch delete
// Begin transaction
const char *querystr = "BEGIN TRANSACTION;";
int rc = sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
if(rc != SQLITE_OK)
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d): SQL error exec(\"%s\"): %s",
listtype, querystr, *message);
return false;
}
// Create temporary table for JSON argument
if(isDomain)
// Create temporary table for domains to be deleted
querystr = "CREATE TEMPORARY TABLE deltable (type INT, item TEXT);";
else
querystr = "CREATE TEMPORARY TABLE deltable (item TEXT);";
sqlite3_stmt* stmt = NULL;
const char *querystr[3] = {NULL, NULL, NULL};
if(listtype == GRAVITY_GROUPS)
querystr[0] = "DELETE FROM \"group\" WHERE name = :argument;";
else if(listtype == GRAVITY_ADLISTS)
rc = sqlite3_prepare_v2(gravity_db, querystr, -1, &stmt, NULL);
if( rc != SQLITE_OK )
{
// This is actually a three-step deletion to satisfy foreign-key constraints
querystr[0] = "DELETE FROM gravity WHERE adlist_id = (SELECT id FROM adlist WHERE address = :argument);";
querystr[1] = "DELETE FROM antigravity WHERE adlist_id = (SELECT id FROM adlist WHERE address = :argument);";
querystr[2] = "DELETE FROM adlist WHERE address = :argument;";
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d) - SQL error prepare(\"%s\"): %s",
listtype, querystr, *message);
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
return false;
}
else if(listtype == GRAVITY_CLIENTS)
querystr[0] = "DELETE FROM client WHERE ip = :argument;";
else // domainlist
querystr[0] = "DELETE FROM domainlist WHERE domain = :argument AND type = :type;";
bool okay = true;
for(unsigned int i = 0; i < ArraySize(querystr); i++)
// Execute statement
if((rc = sqlite3_step(stmt)) != SQLITE_DONE)
{
// Finish if no more queries
if(querystr[i] == NULL)
break;
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d) - SQL error step(\"%s\"): %s",
listtype, querystr, *message);
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
// We need to perform a second SQL request
int rc = sqlite3_prepare_v2(gravity_db, querystr[i], -1, &stmt, NULL);
if( rc != SQLITE_OK )
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d, %s) - SQL error prepare %u (%i): %s",
type, argument, i, rc, *message);
return false;
}
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
// Bind domain to prepared statement (if requested)
const int arg_idx = sqlite3_bind_parameter_index(stmt, ":argument");
if(arg_idx > 0 && (rc = sqlite3_bind_text(stmt, arg_idx, argument, -1, SQLITE_STATIC)) != SQLITE_OK)
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d, %s): Failed to bind argument %u (error %d) - %s",
type, argument, i, rc, *message);
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
return false;
}
return false;
}
// Bind type to prepared statement (if requested)
// Finalize statement
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
// Prepare statement for inserting items into virtual table
if(isDomain)
querystr = "INSERT INTO deltable (type, item) VALUES (:type, :item);";
else
querystr = "INSERT INTO deltable (item) VALUES (:item);";
rc = sqlite3_prepare_v2(gravity_db, querystr, -1, &stmt, NULL);
if( rc != SQLITE_OK )
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d) - SQL error prepare(\"%s\"): %s",
listtype, querystr, *message);
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
return false;
}
// Loop over all domains in the JSON array
cJSON *it = NULL;
cJSON_ArrayForEach(it, array)
{
// Bind type to prepared statement
cJSON *type = cJSON_GetObjectItemCaseSensitive(it, "type");
const int type_idx = sqlite3_bind_parameter_index(stmt, ":type");
if(type_idx > 0 && (rc = sqlite3_bind_int(stmt, type_idx, type)) != SQLITE_OK)
if(type_idx > 0 && (!cJSON_IsNumber(type) || (rc = sqlite3_bind_int(stmt, type_idx, type->valueint)) != SQLITE_OK))
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d, %s): Failed to bind type (2) (error %d) - %s",
type, argument, rc, *message);
log_err("gravityDB_delFromTable(%d): Failed to bind type (error %d) - %s",
type->valueint, rc, *message);
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
return false;
}
// Bind item to prepared statement
cJSON *item = cJSON_GetObjectItemCaseSensitive(it, "item");
const int item_idx = sqlite3_bind_parameter_index(stmt, ":item");
if(item_idx > 0 && (!cJSON_IsString(item) || (rc = sqlite3_bind_text(stmt, item_idx, item->valuestring, -1, SQLITE_STATIC)) != SQLITE_OK))
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d): Failed to bind item (error %d) - %s",
listtype, rc, *message);
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
return false;
}
// Execute statement
if((rc = sqlite3_step(stmt)) != SQLITE_DONE)
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d) - SQL error step(\"%s\"): %s",
listtype, querystr, *message);
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
return false;
}
// Reset statement
sqlite3_reset(stmt);
// Debug output
if(config.debug.api.v.b)
{
log_debug(DEBUG_API, "SQL: %s", querystr[i]);
if(arg_idx > 0)
log_debug(DEBUG_API, " :argument = \"%s\"", argument);
log_debug(DEBUG_API, "SQL: %s", querystr);
if(item_idx > 0)
log_debug(DEBUG_API, " :item = \"%s\"", item->valuestring);
if(type_idx > 0)
log_debug(DEBUG_API, " :type = \"%i\"", type);
log_debug(DEBUG_API, " :type = %i", cJSON_IsNumber(type) ? type->valueint : -1);
}
// Perform step
okay = false;
if((rc = sqlite3_step(stmt)) == SQLITE_DONE)
{
// Item removed
okay = true;
}
else
{
*message = sqlite3_errmsg(gravity_db);
}
// Finalize statement
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
}
return okay;
// Finalize statement
sqlite3_finalize(stmt);
// Prepare SQL for deleting items from the requested table
const char *querystrs[4] = {NULL, NULL, NULL, NULL};
if(listtype == GRAVITY_GROUPS)
querystrs[0] = "DELETE FROM \"group\" WHERE name IN (SELECT item FROM deltable);";
else if(listtype == GRAVITY_ADLISTS)
{
// This is actually a three-step deletion to satisfy foreign-key constraints
querystrs[0] = "DELETE FROM gravity WHERE adlist_id IN (SELECT id FROM adlist WHERE address IN (SELECT item FROM deltable));";
querystrs[1] = "DELETE FROM antigravity WHERE adlist_id IN (SELECT id FROM adlist WHERE address IN (SELECT item FROM deltable));";
querystrs[2] = "DELETE FROM adlist WHERE address IN (SELECT item FROM deltable);";
}
else if(listtype == GRAVITY_CLIENTS)
querystrs[0] = "DELETE FROM client WHERE ip IN (SELECT item FROM deltable);";
else // domainlist
{
querystrs[0] = "DELETE FROM domainlist WHERE domain IN (SELECT item FROM deltable WHERE type = 0) AND type = 0;";
querystrs[1] = "DELETE FROM domainlist WHERE domain IN (SELECT item FROM deltable WHERE type = 1) AND type = 1;";
querystrs[2] = "DELETE FROM domainlist WHERE domain IN (SELECT item FROM deltable WHERE type = 2) AND type = 2;";
querystrs[3] = "DELETE FROM domainlist WHERE domain IN (SELECT item FROM deltable WHERE type = 3) AND type = 3;";
}
for(unsigned int i = 0; i < ArraySize(querystrs); i++)
{
// Finish if no more queries
if(querystrs[i] == NULL)
break;
// Execute statement
rc = sqlite3_exec(gravity_db, querystrs[i], NULL, NULL, NULL);
if(rc != SQLITE_OK)
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d): SQL error exec(\"%s\"): %s",
listtype, querystrs[i], *message);
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
return false;
}
// Add number of deleted rows
*deleted += sqlite3_changes(gravity_db);
}
// Drop temporary table
querystr = "DROP TABLE deltable;";
rc = sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
if(rc != SQLITE_OK)
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d): SQL error exec(\"%s\"): %s",
listtype, querystr, *message);
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
}
// Commit transaction
querystr = "COMMIT TRANSACTION;";
rc = sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
if(rc != SQLITE_OK)
{
*message = sqlite3_errmsg(gravity_db);
log_err("gravityDB_delFromTable(%d): SQL error exec(\"%s\"): %s",
listtype, querystr, *message);
// Rollback transaction
querystr = "ROLLBACK TRANSACTION;";
sqlite3_exec(gravity_db, querystr, NULL, NULL, NULL);
}
return true;
}
static sqlite3_stmt* read_stmt = NULL;

View File

@ -59,7 +59,6 @@ cJSON *gen_abp_patterns(const char *domain, const bool antigravity);
enum db_result in_gravity(const char *domain, clientsData *client, const bool antigravity, int* domain_id);
enum db_result in_denylist(const char *domain, DNSCacheData *dns_cache, clientsData *client);
enum db_result in_allowlist(const char *domain, DNSCacheData *dns_cache, clientsData *client);
bool in_auditlist(const char *domain);
bool gravityDB_get_regex_client_groups(clientsData* client, const unsigned int numregex, const regexData *regex,
const unsigned char type, const char* table);
@ -70,7 +69,7 @@ bool gravityDB_readTableGetRow(const enum gravity_list_type listtype, tablerow *
void gravityDB_readTableFinalize(void);
bool gravityDB_addToTable(const enum gravity_list_type listtype, tablerow *row,
const char **message, const enum http_method method);
bool gravityDB_delFromTable(const enum gravity_list_type listtype, const char* domain_name, const char **message);
bool gravityDB_delFromTable(const enum gravity_list_type listtype, const cJSON* array, unsigned int *deleted, const char **message);
bool gravityDB_edit_groups(const enum gravity_list_type listtype, cJSON *groups,
const tablerow *row, const char **message);

View File

@ -27,6 +27,8 @@
#include "gc.h"
// get_filesystem_details()
#include "files.h"
// get_memdb()
#include "database/query-table.h"
static const char *get_message_type_str(const enum message_type type)
{
@ -214,23 +216,10 @@ bool create_message_table(sqlite3 *db)
// Flush message table
bool flush_message_table(void)
{
// Return early if database is known to be broken
if(FTLDBerror())
return false;
sqlite3 *db;
// Open database connection
if((db = dbopen(false, false)) == NULL)
{
log_err("flush_message_table() - Failed to open DB");
return false;
}
sqlite3 *memdb = get_memdb();
// Flush message table
SQL_bool(db, "DELETE FROM message;");
// Close database connection
dbclose(&db);
SQL_bool(memdb, "DELETE FROM disk.message;");
return true;
}
@ -389,7 +378,7 @@ end_of_add_message: // Close database connection
return rowid;
}
bool delete_message(cJSON *ids)
bool delete_message(cJSON *ids, int *deleted)
{
// Return early if database is known to be broken
if(FTLDBerror())
@ -424,6 +413,10 @@ bool delete_message(cJSON *ids)
log_err("SQL error (%i): %s", sqlite3_errcode(db), sqlite3_errmsg(db));
return false;
}
// Add to deleted count
*deleted += sqlite3_changes(db);
sqlite3_reset(res);
sqlite3_clear_bindings(res);
}
@ -659,7 +652,7 @@ static void format_inaccessible_adlist_message(char *plain, const int sizeof_pla
char *escaped_address = escape_html(address);
if(snprintf(html, sizeof_html, "<a href=\"groups-adlists.lp?adlist=%i\">List with ID <strong>%d</strong> (<code>%s</code>)</a> was inaccessible during last gravity run",
if(snprintf(html, sizeof_html, "<a href=\"groups/lists?listid=%i\">List with ID <strong>%d</strong> (<code>%s</code>)</a> was inaccessible during last gravity run",
dbindex, dbindex, escaped_address) > sizeof_html)
log_warn("format_inaccessible_adlist_message(): Buffer too small to hold HTML message, warning truncated");

View File

@ -16,7 +16,7 @@
int count_messages(const bool filter_dnsmasq_warnings);
bool format_messages(cJSON *array);
bool create_message_table(sqlite3 *db);
bool delete_message(cJSON *ids);
bool delete_message(cJSON *ids, int *deleted);
bool flush_message_table(void);
void logg_regex_warning(const char *type, const char *warning, const int dbindex, const char *regex);
void logg_subnet_warning(const char *ip, const int matching_count, const char *matching_ids,

View File

@ -19,7 +19,7 @@
#include "../datastructure.h"
// struct config
#include "../config/config.h"
// resolveHostname()
// resolve_this_name()
#include "../resolve.h"
// killed
#include "../signals.h"
@ -1083,7 +1083,7 @@ static bool add_local_interfaces_to_network_table(sqlite3 *db, time_t now, unsig
// Try to read IPv4 address
// We need a special rule here to avoid "inet6 ..." being accepted as IPv4 address
if(sscanf(linebuffer, " inet%*[ ]%[0-9.] brd", ipaddr) == 1)
if(sscanf(linebuffer, " inet%*[ ]%127[0-9.] brd", ipaddr) == 1)
{
// Obtained an IPv4 address
ipaddr[sizeof(ipaddr)-1] = '\0';
@ -1091,7 +1091,7 @@ static bool add_local_interfaces_to_network_table(sqlite3 *db, time_t now, unsig
else
{
// Try to read IPv6 address
if(sscanf(linebuffer, " inet6%*[ ]%[0-9a-fA-F:] scope", ipaddr) == 1)
if(sscanf(linebuffer, " inet6%*[ ]%127[0-9a-fA-F:] scope", ipaddr) == 1)
{
// Obtained an IPv6 address
ipaddr[sizeof(ipaddr)-1] = '\0';
@ -1250,7 +1250,6 @@ void parse_neighbor_cache(sqlite3* db)
// Prepare buffers
char *linebuffer = NULL;
size_t linebuffersize = 0u;
char ip[128], hwaddr[128], iface[128];
unsigned int entries = 0u, additional_entries = 0u;
time_t now = time(NULL);
@ -1314,6 +1313,7 @@ void parse_neighbor_cache(sqlite3* db)
break;
// Analyze line
char ip[128], hwaddr[128], iface[128];
int num = sscanf(linebuffer, "%99s dev %99s lladdr %99s",
ip, iface, hwaddr);
@ -2144,6 +2144,84 @@ char *__attribute__((malloc)) getNameFromIP(sqlite3 *db, const char *ipaddr)
return name;
}
// Get most recently seen host name of device identified by MAC address
char *__attribute__((malloc)) getNameFromMAC(const char *client)
{
// Return early if database is known to be broken
if(FTLDBerror())
return NULL;
log_debug(DEBUG_DATABASE,"Looking up host name for %s", client);
// Open pihole-FTL.db database file
sqlite3 *db = NULL;
if((db = dbopen(false, false)) == NULL)
{
log_warn("getNameFromMAC(\"%s\") - Failed to open DB", client);
return NULL;
}
// Check for a host name associated with the given client as MAC address
// COLLATE NOCASE: Case-insensitive comparison
const char *querystr = "SELECT name FROM network_addresses "
"WHERE name IS NOT NULL AND "
"network_id = (SELECT id FROM network WHERE hwaddr = ? COLLATE NOCASE) "
"ORDER BY lastSeen DESC LIMIT 1";
sqlite3_stmt *stmt = NULL;
int rc = sqlite3_prepare_v2(db, querystr, -1, &stmt, NULL);
if(rc != SQLITE_OK)
{
log_err("getNameFromMAC(\"%s\") - SQL error prepare: %s",
client, sqlite3_errstr(rc));
dbclose(&db);
return NULL;
}
// Bind client to prepared statement
if((rc = sqlite3_bind_text(stmt, 1, client, -1, SQLITE_STATIC)) != SQLITE_OK)
{
log_warn("getNameFromMAC(\"%s\"): Failed to bind ip: %s",
client, sqlite3_errstr(rc));
checkFTLDBrc(rc);
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
dbclose(&db);
return NULL;
}
char *name = NULL;
rc = sqlite3_step(stmt);
if(rc == SQLITE_ROW)
{
// Database record found (result might be empty)
name = strdup((char*)sqlite3_column_text(stmt, 0));
if(config.debug.resolver.v.b)
log_debug(DEBUG_RESOLVER, "Found database host name (by MAC) %s -> %s",
client, name);
}
else if(rc == SQLITE_DONE)
{
// Not found
if(config.debug.resolver.v.b)
log_debug(DEBUG_RESOLVER, " ---> not found");
}
else
{
// Error
checkFTLDBrc(rc);
return NULL;
}
// Finalize statement and close database handle
sqlite3_reset(stmt);
sqlite3_finalize(stmt);
dbclose(&db);
return name;
}
// Get interface of device identified by IP address
char *__attribute__((malloc)) getIfaceFromIP(sqlite3 *db, const char *ipaddr)
{
@ -2347,7 +2425,7 @@ void networkTable_readIPsFinalize(sqlite3_stmt *read_stmt)
sqlite3_finalize(read_stmt);
}
bool networkTable_deleteDevice(sqlite3 *db, const int id, const char **message)
bool networkTable_deleteDevice(sqlite3 *db, const int id, int *deleted, const char **message)
{
// First step: Delete all associated IPs of this device
// Prepare SQLite statement
@ -2384,6 +2462,9 @@ bool networkTable_deleteDevice(sqlite3 *db, const int id, const char **message)
return false;
}
// Check if we deleted any rows
*deleted += sqlite3_changes(db);
// Finalize statement
sqlite3_finalize(stmt);
@ -2420,8 +2501,37 @@ bool networkTable_deleteDevice(sqlite3 *db, const int id, const char **message)
return false;
}
// Check if we deleted any rows
*deleted += sqlite3_changes(db);
// Finalize statement
sqlite3_finalize(stmt);
return true;
}
// Counting number of occurrences of a specific char in a string
static size_t __attribute__ ((pure)) count_char(const char *haystack, const char needle)
{
size_t count = 0u;
while(*haystack)
if (*haystack++ == needle)
++count;
return count;
}
// Identify MAC addresses using a set of suitable criteria
bool __attribute__ ((pure)) isMAC(const char *input)
{
if(input != NULL && // Valid input
strlen(input) == 17u && // MAC addresses are always 17 chars long (6 bytes + 5 colons)
count_char(input, ':') == 5u && // MAC addresses always have 5 colons
strstr(input, "::") == NULL) // No double-colons (IPv6 address abbreviation)
{
// This is a MAC address of the form AA:BB:CC:DD:EE:FF
return true;
}
// Not a MAC address
return false;
}

View File

@ -18,12 +18,14 @@ bool create_network_addresses_with_names_table(sqlite3 *db);
void parse_neighbor_cache(sqlite3 *db);
void updateMACVendorRecords(sqlite3 *db);
bool unify_hwaddr(sqlite3 *db);
char* __attribute__((malloc)) getMACfromIP(sqlite3 *db, const char* ipaddr);
char *getMACfromIP(sqlite3 *db, const char* ipaddr) __attribute__((malloc));
int getAliasclientIDfromIP(sqlite3 *db, const char *ipaddr);
char* __attribute__((malloc)) getNameFromIP(sqlite3 *db, const char* ipaddr);
char* __attribute__((malloc)) getIfaceFromIP(sqlite3 *db, const char* ipaddr);
char *getNameFromIP(sqlite3 *db, const char* ipaddr) __attribute__((malloc));
char *getNameFromMAC(const char *client) __attribute__((malloc));
char *getIfaceFromIP(sqlite3 *db, const char* ipaddr) __attribute__((malloc));
void resolveNetworkTableNames(void);
bool flush_network_table(void);
bool isMAC(const char *input) __attribute__ ((pure));
typedef struct {
unsigned int id;
@ -50,6 +52,6 @@ bool networkTable_readIPs(sqlite3 *db, sqlite3_stmt **read_stmt, const int id, c
bool networkTable_readIPsGetRecord(sqlite3_stmt *read_stmt, network_addresses_record *network_addresses, const char **message);
void networkTable_readIPsFinalize(sqlite3_stmt *read_stmt);
bool networkTable_deleteDevice(sqlite3 *db, const int id, const char **message);
bool networkTable_deleteDevice(sqlite3 *db, const int id, int *deleted, const char **message);
#endif //NETWORKTABLE_H

View File

@ -8,21 +8,21 @@
* This file is copyright under the latest version of the EUPL.
* Please see LICENSE file for your rights under this license. */
#include "../FTL.h"
#include "FTL.h"
#define QUERY_TABLE_PRIVATE
#include "query-table.h"
#include "sqlite3.h"
#include "../log.h"
#include "../config/config.h"
#include "../enums.h"
#include "../config/config.h"
#include "database/query-table.h"
#include "database/sqlite3.h"
#include "log.h"
#include "config/config.h"
#include "enums.h"
#include "config/config.h"
// counters
#include "../shmem.h"
#include "../overTime.h"
#include "common.h"
#include "../timers.h"
#include "shmem.h"
#include "overTime.h"
#include "database/common.h"
#include "timers.h"
static sqlite3 *memdb = NULL;
static sqlite3 *_memdb = NULL;
static double new_last_timestamp = 0;
static unsigned int new_total = 0, new_blocked = 0;
static unsigned long last_mem_db_idx = 0, last_disk_db_idx = 0;
@ -60,10 +60,10 @@ void db_counts(unsigned long *last_idx, unsigned long *mem_num, unsigned long *d
bool init_memory_database(void)
{
int rc;
const char *uri = "file:memdb?mode=memory&cache=shared";
// Try to open in-memory database
rc = sqlite3_open_v2(uri, &memdb, SQLITE_OPEN_READWRITE, NULL);
// The :memory: database always has synchronous=OFF since the content of
// it is ephemeral and is not expected to survive a power outage.
rc = sqlite3_open_v2(":memory:", &_memdb, SQLITE_OPEN_READWRITE, NULL);
if( rc != SQLITE_OK )
{
log_err("init_memory_database(): Step error while trying to open database: %s",
@ -72,12 +72,12 @@ bool init_memory_database(void)
}
// Explicitly set busy handler to value defined in FTL.h
rc = sqlite3_busy_timeout(memdb, DATABASE_BUSY_TIMEOUT);
rc = sqlite3_busy_timeout(_memdb, DATABASE_BUSY_TIMEOUT);
if( rc != SQLITE_OK )
{
log_err("init_memory_database(): Step error while trying to set busy timeout (%d ms): %s",
DATABASE_BUSY_TIMEOUT, sqlite3_errstr(rc));
sqlite3_close(memdb);
sqlite3_close(_memdb);
return false;
}
@ -85,11 +85,11 @@ bool init_memory_database(void)
for(unsigned int i = 0; i < ArraySize(table_creation); i++)
{
log_debug(DEBUG_DATABASE, "init_memory_database(): Executing %s", table_creation[i]);
rc = sqlite3_exec(memdb, table_creation[i], NULL, NULL, NULL);
rc = sqlite3_exec(_memdb, table_creation[i], NULL, NULL, NULL);
if( rc != SQLITE_OK ){
log_err("init_memory_database(\"%s\") failed: %s",
table_creation[i], sqlite3_errstr(rc));
sqlite3_close(memdb);
sqlite3_close(_memdb);
return false;
}
}
@ -99,15 +99,34 @@ bool init_memory_database(void)
for(unsigned int i = 0; i < ArraySize(index_creation); i++)
{
log_debug(DEBUG_DATABASE, "init_memory_database(): Executing %s", index_creation[i]);
rc = sqlite3_exec(memdb, index_creation[i], NULL, NULL, NULL);
rc = sqlite3_exec(_memdb, index_creation[i], NULL, NULL, NULL);
if( rc != SQLITE_OK ){
log_err("init_memory_database(\"%s\") failed: %s",
index_creation[i], sqlite3_errstr(rc));
sqlite3_close(memdb);
sqlite3_close(_memdb);
return false;
}
}
// Attach disk database
if(!attach_database(_memdb, NULL, config.files.database.v.s, "disk"))
return false;
// Change journal mode to WAL
// - WAL is significantly faster in most scenarios.
// - WAL provides more concurrency as readers do not block writers and a
// writer does not block readers. Reading and writing can proceed
// concurrently.
// - Disk I/O operations tends to be more sequential using WAL.
rc = sqlite3_exec(_memdb, "PRAGMA disk.journal_mode=WAL", NULL, NULL, NULL);
if( rc != SQLITE_OK )
{
log_err("init_memory_database(): Step error while trying to set journal mode: %s",
sqlite3_errstr(rc));
sqlite3_close(_memdb);
return false;
}
// Everything went well
return true;
}
@ -116,11 +135,15 @@ bool init_memory_database(void)
void close_memory_database(void)
{
// Return early if there is no memory database to be closed
if(memdb == NULL)
if(_memdb == NULL)
return;
// Detach disk database
if(!detach_database(_memdb, NULL, "disk"))
log_err("close_memory_database(): Failed to detach disk database");
// Close SQLite3 memory database
int ret = sqlite3_close(memdb);
int ret = sqlite3_close(_memdb);
if(ret != SQLITE_OK)
log_err("Finalizing memory database failed: %s",
sqlite3_errstr(ret));
@ -128,12 +151,13 @@ void close_memory_database(void)
log_debug(DEBUG_DATABASE, "Closed memory database");
// Set global pointer to NULL
memdb = NULL;
_memdb = NULL;
}
sqlite3 *__attribute__((pure)) get_memdb(void)
{
return memdb;
log_debug(DEBUG_DATABASE, "Accessing in-memory database");
return _memdb;
}
// Get memory usage and size of in-memory tables
@ -188,7 +212,7 @@ static bool get_memdb_size(sqlite3 *db, size_t *memsize, int *queries)
*memsize = page_count * page_size;
// Get number of queries in the memory table
if((*queries = get_number_of_queries_in_DB(db, "query_storage", false)) == DB_FAILED)
if((*queries = get_number_of_queries_in_DB(db, "query_storage")) == DB_FAILED)
return false;
return true;
@ -202,6 +226,7 @@ static void log_in_memory_usage(void)
size_t memsize = 0;
int queries = 0;
sqlite3 *memdb = get_memdb();
if(get_memdb_size(memdb, &memsize, &queries))
{
char prefix[2] = { 0 };
@ -212,11 +237,6 @@ static void log_in_memory_usage(void)
}
}
// Attach disk database to in-memory database
bool attach_disk_database(const char **message)
{
return attach_database(memdb, message, config.files.database.v.s, "disk");
}
// Attach database using specified path and alias
bool attach_database(sqlite3* db, const char **message, const char *path, const char *alias)
@ -277,12 +297,6 @@ bool attach_database(sqlite3* db, const char **message, const char *path, const
return okay;
}
// Detach disk database to in-memory database
bool detach_disk_database(const char **message)
{
return detach_database(memdb, message, "disk");
}
// Detach a previously attached database by its alias
bool detach_database(sqlite3* db, const char **message, const char *alias)
{
@ -333,13 +347,10 @@ bool detach_database(sqlite3* db, const char **message, const char *alias)
// Get number of queries either in the temp or in the on-diks database
// This routine is used by the API routines.
int get_number_of_queries_in_DB(sqlite3 *db, const char *tablename, const bool do_attach)
int get_number_of_queries_in_DB(sqlite3 *db, const char *tablename)
{
int rc = 0, num = 0;
sqlite3_stmt *stmt = NULL;
// Attach disk database if required
if(do_attach && !attach_disk_database(NULL))
return DB_FAILED;
// Count number of rows
const size_t buflen = 42 + strlen(tablename);
@ -348,7 +359,7 @@ int get_number_of_queries_in_DB(sqlite3 *db, const char *tablename, const bool d
// The database pointer may be NULL, meaning we want the memdb
if(db == NULL)
db = memdb;
db = get_memdb();
// PRAGMA page_size
rc = sqlite3_prepare_v2(db, querystr, -1, &stmt, NULL);
@ -358,8 +369,6 @@ int get_number_of_queries_in_DB(sqlite3 *db, const char *tablename, const bool d
log_err("get_number_of_queries_in_DB(%s): Prepare error: %s",
tablename, sqlite3_errstr(rc));
free(querystr);
if(do_attach)
detach_disk_database(NULL);
return false;
}
rc = sqlite3_step(stmt);
@ -371,17 +380,11 @@ int get_number_of_queries_in_DB(sqlite3 *db, const char *tablename, const bool d
tablename, sqlite3_errstr(rc));
free(querystr);
sqlite3_finalize(stmt);
if(do_attach)
detach_disk_database(NULL);
return false;
}
sqlite3_finalize(stmt);
free(querystr);
// Detach only if attached herein
if(do_attach && !detach_disk_database(NULL))
return DB_FAILED;
return num;
}
@ -395,24 +398,20 @@ bool import_queries_from_disk(void)
const double mintime = now - config.webserver.api.maxHistory.v.ui;
const char *querystr = "INSERT INTO query_storage SELECT * FROM disk.query_storage WHERE timestamp >= ?";
// Attach disk database
if(!attach_disk_database(NULL))
return false;
// Begin transaction
int rc;
sqlite3 *memdb = get_memdb();
if((rc = sqlite3_exec(memdb, "BEGIN TRANSACTION", NULL, NULL, NULL)) != SQLITE_OK)
{
log_err("import_queries_from_disk(): Cannot start transaction: %s", sqlite3_errstr(rc));
detach_disk_database(NULL);
return false;
}
// Prepare SQLite3 statement
sqlite3_stmt *stmt = NULL;
log_debug(DEBUG_DATABASE, "Accessing in-memory database");
if((rc = sqlite3_prepare_v2(memdb, querystr, -1, &stmt, NULL)) != SQLITE_OK){
log_err("import_queries_from_disk(): SQL error prepare: %s", sqlite3_errstr(rc));
detach_disk_database(NULL);
return false;
}
@ -421,7 +420,6 @@ bool import_queries_from_disk(void)
{
log_err("import_queries_from_disk(): Failed to bind type mintime: %s", sqlite3_errstr(rc));
sqlite3_finalize(stmt);
detach_disk_database(NULL);
return false;
}
@ -464,16 +462,12 @@ bool import_queries_from_disk(void)
if((rc = sqlite3_exec(memdb, "END TRANSACTION", NULL, NULL, NULL)) != SQLITE_OK)
{
log_err("import_queries_from_disk(): Cannot end transaction: %s", sqlite3_errstr(rc));
detach_disk_database(NULL);
return false;
}
// Get number of queries on disk before detaching
disk_db_num = get_number_of_queries_in_DB(memdb, "disk.query_storage", false);
mem_db_num = get_number_of_queries_in_DB(memdb, "query_storage", false);
if(!detach_disk_database(NULL))
return false;
disk_db_num = get_number_of_queries_in_DB(memdb, "disk.query_storage");
mem_db_num = get_number_of_queries_in_DB(memdb, "query_storage");
log_info("Imported %u queries from the on-disk database (it has %u rows)", mem_db_num, disk_db_num);
@ -494,19 +488,16 @@ bool export_queries_to_disk(bool final)
// Start database timer
timer_start(DATABASE_WRITE_TIMER);
// Attach disk database
if(!attach_disk_database(NULL))
return false;
// Start transaction
sqlite3 *memdb = get_memdb();
SQL_bool(memdb, "BEGIN TRANSACTION");
// Prepare SQLite3 statement
sqlite3_stmt *stmt = NULL;
log_debug(DEBUG_DATABASE, "Accessing in-memory database");
int rc = sqlite3_prepare_v2(memdb, querystr, -1, &stmt, NULL);
if( rc != SQLITE_OK ){
log_err("export_queries_to_disk(): SQL error prepare: %s", sqlite3_errstr(rc));
detach_disk_database(NULL);
return false;
}
@ -514,7 +505,6 @@ bool export_queries_to_disk(bool final)
if((rc = sqlite3_bind_int64(stmt, 1, last_disk_db_idx)) != SQLITE_OK)
{
log_err("export_queries_to_disk(): Failed to bind id: %s", sqlite3_errstr(rc));
detach_disk_database(NULL);
return false;
}
@ -525,7 +515,6 @@ bool export_queries_to_disk(bool final)
if((rc = sqlite3_bind_double(stmt, 2, time)) != SQLITE_OK)
{
log_err("export_queries_to_disk(): Failed to bind time: %s", sqlite3_errstr(rc));
detach_disk_database(NULL);
return false;
}
@ -548,6 +537,7 @@ bool export_queries_to_disk(bool final)
// Update last_disk_db_idx
// Prepare SQLite3 statement
log_debug(DEBUG_DATABASE, "Accessing in-memory database");
rc = sqlite3_prepare_v2(memdb, "SELECT MAX(id) FROM disk.query_storage;", -1, &stmt, NULL);
// Perform step
@ -589,16 +579,11 @@ bool export_queries_to_disk(bool final)
if((rc = sqlite3_exec(memdb, "END TRANSACTION", NULL, NULL, NULL)) != SQLITE_OK)
{
log_err("export_queries_to_disk(): Cannot end transaction: %s", sqlite3_errstr(rc));
detach_disk_database(NULL);
return false;
}
// Update number of queries in the disk database
disk_db_num = get_number_of_queries_in_DB(memdb, "disk.query_storage", false);
// Detach disk database
if(!detach_disk_database(NULL))
return false;
disk_db_num = get_number_of_queries_in_DB(memdb, "disk.query_storage");
// All temp queries were stored to disk, update the IDs
last_disk_db_idx += insertions;
@ -629,7 +614,7 @@ bool delete_old_queries_from_db(const bool use_memdb, const double mintime)
sqlite3 *db = NULL;
if(use_memdb)
db = memdb;
db = get_memdb();
else
db = dbopen(false, false);
@ -657,7 +642,8 @@ bool delete_old_queries_from_db(const bool use_memdb, const double mintime)
mintime, sqlite3_errstr(rc));
// Update number of queries in in-memory database
const int new_num = get_number_of_queries_in_DB(memdb, "query_storage", false);
sqlite3 *memdb = get_memdb();
const int new_num = get_number_of_queries_in_DB(memdb, "query_storage");
log_debug(DEBUG_GC, "delete_old_queries_from_db(): Deleted %i (%u) queries, new number of queries in memory: %i",
sqlite3_changes(db), (mem_db_num - new_num), new_num);
mem_db_num = new_num;
@ -784,6 +770,29 @@ bool add_ftl_table_description(sqlite3 *db)
return true;
}
bool rename_query_storage_column_regex_id(sqlite3 *db)
{
// Start transaction of database update
SQL_bool(db, "BEGIN TRANSACTION");
// Rename column regex_id to list_id
SQL_bool(db, "ALTER TABLE query_storage RENAME COLUMN regex_id TO list_id;");
// The VIEW queries is automatically updated by SQLite3
// Update database version to 17
if(!db_set_FTL_property(db, DB_VERSION, 17))
{
log_err("rename_query_storage_column_regex_id(): Failed to update database version!");
return false;
}
// Finish transaction
SQL_bool(db, "COMMIT");
return true;
}
bool optimize_queries_table(sqlite3 *db)
{
// Start transaction of database update
@ -903,6 +912,7 @@ void DB_read_queries(void)
// Prepare SQLite3 statement
sqlite3_stmt *stmt = NULL;
sqlite3 *memdb = get_memdb();
int rc = sqlite3_prepare_v2(memdb, querystr, -1, &stmt, NULL);
if( rc != SQLITE_OK )
{
@ -1064,17 +1074,21 @@ void DB_read_queries(void)
query->type = TYPE_OTHER;
query->qtype = type - 100;
}
counters->querytype[query->type]++;
log_debug(DEBUG_STATUS, "query type %d set (database), ID = %d, new count = %d", query->type, counters->queries, counters->querytype[query->type]);
// Status is set below
query->domainID = domainID;
query->clientID = clientID;
query->upstreamID = upstreamID;
query->id = 0;
query->cacheID = findCacheID(domainID, clientID, query->type, true);
query->id = counters->queries;
query->response = 0;
query->flags.response_calculated = reply_time_avail;
query->dnssec = dnssec;
query->reply = reply;
counters->reply[query->reply]++;
log_debug(DEBUG_STATUS, "reply type %d set (database), ID = %d, new count = %d", query->reply, counters->queries, counters->reply[query->reply]);
query->response = reply_time;
query->CNAME_domainID = -1;
// Initialize flags
@ -1089,15 +1103,15 @@ void DB_read_queries(void)
clientsData *client = getClient(clientID, true);
client->lastQuery = queryTimeStamp;
// Handle type counters
if(type >= TYPE_A && type < TYPE_MAX)
counters->querytype[type]++;
// Update overTime data
overTime[timeidx].total++;
// Update overTime data structure with the new client
// Update client's overTime data structure
change_clientcount(client, 0, 0, timeidx, 1);
// Get domain pointer
domainsData *domain = getDomain(domainID, true);
domain->lastQuery = queryTimeStamp;
// Increase DNS queries counter
counters->queries++;
@ -1122,21 +1136,16 @@ void DB_read_queries(void)
// Set ID of the domainlist entry that was the reason for permitting/blocking this query
// We assume the value in this field is said ID when it is not a CNAME-related domain
// (checked above) and the value of additional_info is not NULL (0 bytes storage size)
const int cacheID = findCacheID(query->domainID, query->clientID, query->type, true);
DNSCacheData *cache = getDNSCache(cacheID, true);
DNSCacheData *cache = getDNSCache(query->cacheID, true);
// Only load if
// a) we have a cache entry
// b) the value of additional_info is not NULL (0 bytes storage size)
if(cache != NULL && sqlite3_column_bytes(stmt, 7) != 0)
cache->domainlist_id = sqlite3_column_int(stmt, 7);
cache->list_id = sqlite3_column_int(stmt, 7);
}
// Increment status counters, we first have to add one to the count of
// unknown queries because query_set_status() will subtract from there
// when setting a different status
if(status != QUERY_UNKNOWN)
counters->status[QUERY_UNKNOWN]++;
query_set_status(query, status);
// Increment status counters
query_set_status_init(query, status);
// Do further processing based on the query status we read from the database
switch(status)
@ -1157,7 +1166,6 @@ void DB_read_queries(void)
case QUERY_SPECIAL_DOMAIN: // Blocked by special domain handling
query->flags.blocked = true;
// Get domain pointer
domainsData *domain = getDomain(domainID, true);
domain->blockedcount++;
change_clientcount(client, 0, 1, -1, 0);
break;
@ -1172,7 +1180,6 @@ void DB_read_queries(void)
upstreamsData *upstream = getUpstream(upstreamID, true);
if(upstream != NULL)
{
upstream->overTime[timeidx]++;
upstream->lastQuery = queryTimeStamp;
upstream->count++;
}
@ -1219,12 +1226,9 @@ void update_disk_db_idx(void)
// starting counting from zero (would result in a UNIQUE constraint violation)
const char *querystr = "SELECT MAX(id) FROM disk.query_storage";
// Attach disk database
if(!attach_disk_database(NULL))
return;
// Prepare SQLite3 statement
sqlite3_stmt *stmt = NULL;
sqlite3 *memdb = get_memdb();
int rc = sqlite3_prepare_v2(memdb, querystr, -1, &stmt, NULL);
// Perform step
@ -1239,9 +1243,6 @@ void update_disk_db_idx(void)
log_debug(DEBUG_DATABASE, "Last long-term idx is %lu", last_disk_db_idx);
if(!detach_disk_database(NULL))
return;
// Update indices so that the next call to DB_save_queries() skips the
// queries that we just imported from the database
last_mem_db_idx = last_disk_db_idx;
@ -1277,6 +1278,7 @@ bool queries_to_database(void)
}
// Start preparing query
sqlite3 *memdb = get_memdb();
rc = sqlite3_prepare_v3(memdb, "REPLACE INTO query_storage VALUES "\
"(?1," \
"?2," \
@ -1462,8 +1464,8 @@ bool queries_to_database(void)
}
// Get cache entry for this query
const int cacheID = findCacheID(query->domainID, query->clientID, query->type, false);
DNSCacheData *cache = cacheID < 0 ? NULL : getDNSCache(cacheID, true);
const int cacheID = query->cacheID >= 0 ? query->cacheID : findCacheID(query->domainID, query->clientID, query->type, false);
DNSCacheData *cache = getDNSCache(cacheID, true);
// ADDITIONAL_INFO
if(query->status == QUERY_GRAVITY_CNAME ||
@ -1488,15 +1490,15 @@ bool queries_to_database(void)
break;
}
}
else if(cache != NULL && query->status == QUERY_REGEX)
else if(cache != NULL && cache->list_id != -1)
{
// Restore regex ID if applicable
sqlite3_bind_int(query_stmt, 9, ADDINFO_REGEX_ID);
sqlite3_bind_int(query_stmt, 10, cache->domainlist_id);
sqlite3_bind_int(query_stmt, 9, ADDINFO_LIST_ID);
sqlite3_bind_int(query_stmt, 10, cache->list_id);
// Execute prepared addinfo statement and check if successful
sqlite3_bind_int(addinfo_stmt, 1, ADDINFO_REGEX_ID);
sqlite3_bind_int(addinfo_stmt, 2, cache->domainlist_id);
sqlite3_bind_int(addinfo_stmt, 1, ADDINFO_LIST_ID);
sqlite3_bind_int(addinfo_stmt, 2, cache->list_id);
rc = sqlite3_step(addinfo_stmt);
sqlite3_clear_bindings(addinfo_stmt);
sqlite3_reset(addinfo_stmt);
@ -1527,9 +1529,9 @@ bool queries_to_database(void)
// DNSSEC
sqlite3_bind_int(query_stmt, 13, query->dnssec);
// REGEX_ID
if(cache != NULL && cache->domainlist_id > -1)
sqlite3_bind_int(query_stmt, 14, cache->domainlist_id);
// LIST_ID
if(cache != NULL && cache->list_id != -1)
sqlite3_bind_int(query_stmt, 14, cache->list_id);
else
// Not applicable, setting NULL
sqlite3_bind_null(query_stmt, 14);
@ -1579,7 +1581,7 @@ bool queries_to_database(void)
}
// Update number of queries in in-memory database
mem_db_num = get_number_of_queries_in_DB(memdb, "query_storage", false);
mem_db_num = get_number_of_queries_in_DB(memdb, "query_storage");
if(config.debug.database.v.b && updated + added > 0)
{

View File

@ -11,7 +11,7 @@
#define QUERY_TABLE_PRIVATE_H
// struct queriesData
#include "../datastructure.h"
#include "datastructure.h"
#define CREATE_FTL_TABLE "CREATE TABLE ftl ( id INTEGER PRIMARY KEY NOT NULL, value BLOB NOT NULL );"
@ -23,20 +23,21 @@
"client TEXT NOT NULL, " \
"forward TEXT );"
#define CREATE_QUERY_STORAGE_TABLE_V13 "CREATE TABLE query_storage ( id INTEGER PRIMARY KEY AUTOINCREMENT, " \
"timestamp INTEGER NOT NULL, " \
"type INTEGER NOT NULL, " \
"status INTEGER NOT NULL, " \
"domain INTEGER NOT NULL, " \
"client INTEGER NOT NULL, " \
"forward INTEGER, " \
"additional_info INTEGER, " \
"reply_type INTEGER, " \
"reply_time REAL, " \
"dnssec INTEGER, " \
"regex_id INTEGER );"
#define MEMDB_VERSION 17
#define CREATE_QUERY_STORAGE_TABLE "CREATE TABLE query_storage ( id INTEGER PRIMARY KEY AUTOINCREMENT, " \
"timestamp INTEGER NOT NULL, " \
"type INTEGER NOT NULL, " \
"status INTEGER NOT NULL, " \
"domain INTEGER NOT NULL, " \
"client INTEGER NOT NULL, " \
"forward INTEGER, " \
"additional_info INTEGER, " \
"reply_type INTEGER, " \
"reply_time REAL, " \
"dnssec INTEGER, " \
"list_id INTEGER );"
#define CREATE_QUERIES_VIEW_V13 "CREATE VIEW queries AS " \
#define CREATE_QUERIES_VIEW "CREATE VIEW queries AS " \
"SELECT id, timestamp, type, status, " \
"CASE typeof(domain) " \
"WHEN 'integer' THEN (SELECT domain FROM domain_by_id d WHERE d.id = q.domain) ELSE domain END domain," \
@ -46,7 +47,7 @@
"WHEN 'integer' THEN (SELECT forward FROM forward_by_id f WHERE f.id = q.forward) ELSE forward END forward," \
"CASE typeof(additional_info) "\
"WHEN 'integer' THEN (SELECT content FROM addinfo_by_id a WHERE a.id = q.additional_info) ELSE additional_info END additional_info, " \
"reply_type, reply_time, dnssec, regex_id FROM query_storage q"
"reply_type, reply_time, dnssec, list_id FROM query_storage q"
// Version 1
#define CREATE_QUERIES_TIMESTAMP_INDEX "CREATE INDEX idx_queries_timestamp ON queries (timestamp);"
@ -62,8 +63,7 @@
#define CREATE_QUERY_STORAGE_REPLY_TYPE_INDEX "CREATE INDEX idx_query_storage_reply_type ON query_storage (reply_type);"
#define CREATE_QUERY_STORAGE_REPLY_TIME_INDEX "CREATE INDEX idx_query_storage_reply_time ON query_storage (reply_time);"
#define CREATE_QUERY_STORAGE_DNSSEC_INDEX "CREATE INDEX idx_query_storage_dnssec ON query_storage (dnssec);"
//#define CREATE_QUERY_STORAGE_TTL_INDEX "CREATE INDEX idx_query_storage_ttl ON query_storage (ttl);"
//#define CREATE_QUERY_STORAGE_REGEX_ID_INDEX "CREATE INDEX idx_query_storage_regex_id ON query_storage (regex_id);"
#define CREATE_QUERY_STORAGE_LIST_ID_INDEX "CREATE INDEX idx_query_storage_list_id ON query_storage (list_id);"
#define CREATE_DOMAINS_BY_ID "CREATE TABLE domain_by_id (id INTEGER PRIMARY KEY, domain TEXT NOT NULL);"
#define CREATE_CLIENTS_BY_ID "CREATE TABLE client_by_id (id INTEGER PRIMARY KEY, ip TEXT NOT NULL, name TEXT);"
@ -77,12 +77,12 @@
#ifdef QUERY_TABLE_PRIVATE
const char *table_creation[] = {
CREATE_QUERY_STORAGE_TABLE_V13,
CREATE_QUERY_STORAGE_TABLE,
CREATE_DOMAINS_BY_ID,
CREATE_CLIENTS_BY_ID,
CREATE_FORWARD_BY_ID,
CREATE_ADDINFO_BY_ID,
CREATE_QUERIES_VIEW_V13,
CREATE_QUERIES_VIEW,
};
const char *index_creation[] = {
CREATE_QUERY_STORAGE_ID_INDEX,
@ -96,8 +96,7 @@ const char *index_creation[] = {
CREATE_QUERY_STORAGE_REPLY_TYPE_INDEX,
CREATE_QUERY_STORAGE_REPLY_TIME_INDEX,
CREATE_QUERY_STORAGE_DNSSEC_INDEX,
// CREATE_QUERY_STORAGE_TTL_INDEX,
// CREATE_QUERY_STORAGE_REGEX_ID_INDEX
CREATE_QUERY_STORAGE_LIST_ID_INDEX
CREATE_DOMAIN_BY_ID_DOMAIN_INDEX,
CREATE_CLIENTS_BY_ID_IPNAME_INDEX,
CREATE_FORWARD_BY_ID_FORWARD_INDEX,
@ -111,11 +110,9 @@ bool init_memory_database(void);
sqlite3 *get_memdb(void) __attribute__((pure));
void close_memory_database(void);
bool import_queries_from_disk(void);
bool attach_disk_database(const char **msg);
bool attach_database(sqlite3* db, const char **message, const char *path, const char *alias);
bool detach_disk_database(const char **msg);
bool detach_database(sqlite3* db, const char **message, const char *alias);
int get_number_of_queries_in_DB(sqlite3 *db, const char *tablename, const bool do_attach);
int get_number_of_queries_in_DB(sqlite3 *db, const char *tablename);
bool export_queries_to_disk(bool final);
bool delete_old_queries_from_db(const bool use_memdb, const double mintime);
bool add_additional_info_column(sqlite3 *db);
@ -128,5 +125,6 @@ bool create_addinfo_table(sqlite3 *db);
bool add_query_storage_columns(sqlite3 *db);
bool add_query_storage_column_regex_id(sqlite3 *db);
bool add_ftl_table_description(sqlite3 *db);
bool rename_query_storage_column_regex_id(sqlite3 *db);
#endif //QUERY_TABLE_PRIVATE_H

View File

@ -12,6 +12,8 @@
#include "database/session-table.h"
#include "database/common.h"
#include "config/config.h"
// get_memdb()
#include "database/query-table.h"
bool create_session_table(sqlite3 *db)
{
@ -198,8 +200,8 @@ bool backup_db_sessions(struct session *sessions, const uint16_t max_sessions)
return false;
}
log_info("Stored %u/%u API session%s in the database",
api_sessions, max_sessions, max_sessions == 1 ? "" : "s");
log_info("Stored %u API session%s in the database",
api_sessions, api_sessions == 1 ? "" : "s");
// Close database connection
dbclose(&db);
@ -216,22 +218,17 @@ bool restore_db_sessions(struct session *sessions, const uint16_t max_sessions)
return true;
}
sqlite3 *db = dbopen(false, false);
if(db == NULL)
{
log_warn("Failed to open database in restore_db_sessions()");
return false;
}
sqlite3 *memdb = get_memdb();
// Remove expired sessions from database
SQL_bool(db, "DELETE FROM session WHERE valid_until < strftime('%%s', 'now');");
SQL_bool(memdb, "DELETE FROM disk.session WHERE valid_until < strftime('%%s', 'now');");
// Get all sessions from database
sqlite3_stmt *stmt = NULL;
if(sqlite3_prepare_v2(db, "SELECT login_at, valid_until, remote_addr, user_agent, sid, csrf, tls_login, tls_mixed, app FROM session;", -1, &stmt, 0) != SQLITE_OK)
if(sqlite3_prepare_v2(memdb, "SELECT login_at, valid_until, remote_addr, user_agent, sid, csrf, tls_login, tls_mixed, app FROM disk.session;", -1, &stmt, 0) != SQLITE_OK)
{
log_err("SQL error in restore_db_sessions(): %s (%d)",
sqlite3_errmsg(db), sqlite3_errcode(db));
sqlite3_errmsg(memdb), sqlite3_errcode(memdb));
return false;
}
@ -296,14 +293,14 @@ bool restore_db_sessions(struct session *sessions, const uint16_t max_sessions)
i++;
}
log_info("Restored %u/%u API session%s from the database",
i, max_sessions, max_sessions == 1 ? "" : "s");
log_info("Restored %u API session%s from the database",
i, i == 1 ? "" : "s");
// Finalize statement
if(sqlite3_finalize(stmt) != SQLITE_OK)
{
log_err("SQL error in restore_db_sessions(): %s (%d)",
sqlite3_errmsg(db), sqlite3_errcode(db));
sqlite3_errmsg(memdb), sqlite3_errcode(memdb));
return false;
}
@ -311,11 +308,9 @@ bool restore_db_sessions(struct session *sessions, const uint16_t max_sessions)
// We use secure_delete to make sure the sessions are really gone
// In this mode, SQLite overwrites the deleted content with zeros
// (https://www.sqlite.org/pragma.html#pragma_secure_delete)
SQL_bool(db, "PRAGMA secure_delete = ON;");
SQL_bool(db, "DELETE FROM session;");
// Close database connection
dbclose(&db);
SQL_bool(memdb, "PRAGMA secure_delete = ON;");
SQL_bool(memdb, "DELETE FROM disk.session;");
SQL_bool(memdb, "PRAGMA secure_delete = OFF;");
return true;
}

File diff suppressed because it is too large Load Diff

View File

@ -29,32 +29,6 @@
// isMAC()
#include "network-table.h"
// Counting number of occurrences of a specific char in a string
static size_t __attribute__ ((pure)) count_char(const char *haystack, const char needle)
{
size_t count = 0u;
while(*haystack)
if (*haystack++ == needle)
++count;
return count;
}
// Identify MAC addresses using a set of suitable criteria
static bool __attribute__ ((pure)) isMAC(const char *input)
{
if(input != NULL && // Valid input
strlen(input) == 17u && // MAC addresses are always 17 chars long (6 bytes + 5 colons)
count_char(input, ':') == 5u && // MAC addresses always have 5 colons
strstr(input, "::") == NULL) // No double-colons (IPv6 address abbreviation)
{
// This is a MAC address of the form AA:BB:CC:DD:EE:FF
return true;
}
// Not a MAC address
return false;
}
static void subnet_match_impl(sqlite3_context *context, int argc, sqlite3_value **argv)
{
// Exactly two arguments should be submitted to this routine

File diff suppressed because it is too large Load Diff

View File

@ -146,9 +146,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION "3.44.0"
#define SQLITE_VERSION_NUMBER 3044000
#define SQLITE_SOURCE_ID "2023-11-01 11:23:50 17129ba1ff7f0daf37100ee82d507aef7827cf38de1866e2633096ae6ad81301"
#define SQLITE_VERSION "3.45.1"
#define SQLITE_VERSION_NUMBER 3045001
#define SQLITE_SOURCE_ID "2024-01-30 16:01:20 e876e51a0ed5c5b3126f52e532044363a014bc594cfefa87ffb5b82257cc467a"
/*
** CAPI3REF: Run-Time Library Version Numbers
@ -3954,15 +3954,17 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename);
** </ul>
**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
** text that describes the error, as either UTF-8 or UTF-16 respectively.
** text that describes the error, as either UTF-8 or UTF-16 respectively,
** or NULL if no error message is available.
** (See how SQLite handles [invalid UTF] for exceptions to this rule.)
** ^(Memory to hold the error message string is managed internally.
** The application does not need to worry about freeing the result.
** However, the error string might be overwritten or deallocated by
** subsequent calls to other SQLite interface functions.)^
**
** ^The sqlite3_errstr() interface returns the English-language text
** that describes the [result code], as UTF-8.
** ^The sqlite3_errstr(E) interface returns the English-language text
** that describes the [result code] E, as UTF-8, or NULL if E is not an
** result code for which a text error message is available.
** ^(Memory to hold the error message string is managed internally
** and must not be freed by the application)^.
**
@ -5573,13 +5575,27 @@ SQLITE_API int sqlite3_create_window_function(
** </dd>
**
** [[SQLITE_SUBTYPE]] <dt>SQLITE_SUBTYPE</dt><dd>
** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call
** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call
** [sqlite3_value_subtype()] to inspect the sub-types of its arguments.
** Specifying this flag makes no difference for scalar or aggregate user
** functions. However, if it is not specified for a user-defined window
** function, then any sub-types belonging to arguments passed to the window
** function may be discarded before the window function is called (i.e.
** sqlite3_value_subtype() will always return 0).
** This flag instructs SQLite to omit some corner-case optimizations that
** might disrupt the operation of the [sqlite3_value_subtype()] function,
** causing it to return zero rather than the correct subtype().
** SQL functions that invokes [sqlite3_value_subtype()] should have this
** property. If the SQLITE_SUBTYPE property is omitted, then the return
** value from [sqlite3_value_subtype()] might sometimes be zero even though
** a non-zero subtype was specified by the function argument expression.
**
** [[SQLITE_RESULT_SUBTYPE]] <dt>SQLITE_RESULT_SUBTYPE</dt><dd>
** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call
** [sqlite3_result_subtype()] to cause a sub-type to be associated with its
** result.
** Every function that invokes [sqlite3_result_subtype()] should have this
** property. If it does not, then the call to [sqlite3_result_subtype()]
** might become a no-op if the function is used as term in an
** [expression index]. On the other hand, SQL functions that never invoke
** [sqlite3_result_subtype()] should avoid setting this property, as the
** purpose of this property is to disable certain optimizations that are
** incompatible with subtypes.
** </dd>
** </dl>
*/
@ -5587,6 +5603,7 @@ SQLITE_API int sqlite3_create_window_function(
#define SQLITE_DIRECTONLY 0x000080000
#define SQLITE_SUBTYPE 0x000100000
#define SQLITE_INNOCUOUS 0x000200000
#define SQLITE_RESULT_SUBTYPE 0x001000000
/*
** CAPI3REF: Deprecated Functions
@ -5783,6 +5800,12 @@ SQLITE_API int sqlite3_value_encoding(sqlite3_value*);
** information can be used to pass a limited amount of context from
** one SQL function to another. Use the [sqlite3_result_subtype()]
** routine to set the subtype for the return value of an SQL function.
**
** Every [application-defined SQL function] that invoke this interface
** should include the [SQLITE_SUBTYPE] property in the text
** encoding argument when the function is [sqlite3_create_function|registered].
** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype()
** might return zero instead of the upstream subtype in some corner cases.
*/
SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*);
@ -5913,14 +5936,22 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
** <li> ^(when sqlite3_set_auxdata() is invoked again on the same
** parameter)^, or
** <li> ^(during the original sqlite3_set_auxdata() call when a memory
** allocation error occurs.)^ </ul>
** allocation error occurs.)^
** <li> ^(during the original sqlite3_set_auxdata() call if the function
** is evaluated during query planning instead of during query execution,
** as sometimes happens with [SQLITE_ENABLE_STAT4].)^ </ul>
**
** Note the last bullet in particular. The destructor X in
** Note the last two bullets in particular. The destructor X in
** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the
** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata()
** should be called near the end of the function implementation and the
** function implementation should not make any use of P after
** sqlite3_set_auxdata() has been called.
** sqlite3_set_auxdata() has been called. Furthermore, a call to
** sqlite3_get_auxdata() that occurs immediately after a corresponding call
** to sqlite3_set_auxdata() might still return NULL if an out-of-memory
** condition occurred during the sqlite3_set_auxdata() call or if the
** function is being evaluated during query planning rather than during
** query execution.
**
** ^(In practice, auxiliary data is preserved between function calls for
** function parameters that are compile-time constants, including literal
@ -6194,6 +6225,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n);
** higher order bits are discarded.
** The number of subtype bytes preserved by SQLite might increase
** in future releases of SQLite.
**
** Every [application-defined SQL function] that invokes this interface
** should include the [SQLITE_RESULT_SUBTYPE] property in its
** text encoding argument when the SQL function is
** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE]
** property is omitted from the function that invokes sqlite3_result_subtype(),
** then in some cases the sqlite3_result_subtype() might fail to set
** the result subtype.
**
** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any
** SQL function that invokes the sqlite3_result_subtype() interface
** and that does not have the SQLITE_RESULT_SUBTYPE property will raise
** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1
** by default.
*/
SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int);
@ -7994,9 +8039,11 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*);
**
** ^(Some systems (for example, Windows 95) do not support the operation
** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try()
** will always return SQLITE_BUSY. The SQLite core only ever uses
** sqlite3_mutex_try() as an optimization so this is acceptable
** behavior.)^
** will always return SQLITE_BUSY. In most cases the SQLite core only uses
** sqlite3_mutex_try() as an optimization, so this is acceptable
** behavior. The exceptions are unix builds that set the
** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working
** sqlite3_mutex_try() is required.)^
**
** ^The sqlite3_mutex_leave() routine exits a mutex that was
** previously entered by the same thread. The behavior
@ -8255,6 +8302,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_ASSERT 12
#define SQLITE_TESTCTRL_ALWAYS 13
#define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */
#define SQLITE_TESTCTRL_JSON_SELFCHECK 14
#define SQLITE_TESTCTRL_OPTIMIZATIONS 15
#define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */
#define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */
@ -12768,8 +12816,11 @@ struct Fts5PhraseIter {
** created with the "columnsize=0" option.
**
** xColumnText:
** This function attempts to retrieve the text of column iCol of the
** current document. If successful, (*pz) is set to point to a buffer
** If parameter iCol is less than zero, or greater than or equal to the
** number of columns in the table, SQLITE_RANGE is returned.
**
** Otherwise, this function attempts to retrieve the text of column iCol of
** the current document. If successful, (*pz) is set to point to a buffer
** containing the text in utf-8 encoding, (*pn) is set to the size in bytes
** (not characters) of the buffer and SQLITE_OK is returned. Otherwise,
** if an error occurs, an SQLite error code is returned and the final values
@ -12779,8 +12830,10 @@ struct Fts5PhraseIter {
** Returns the number of phrases in the current query expression.
**
** xPhraseSize:
** Returns the number of tokens in phrase iPhrase of the query. Phrases
** are numbered starting from zero.
** If parameter iCol is less than zero, or greater than or equal to the
** number of phrases in the current query, as returned by xPhraseCount,
** 0 is returned. Otherwise, this function returns the number of tokens in
** phrase iPhrase of the query. Phrases are numbered starting from zero.
**
** xInstCount:
** Set *pnInst to the total number of occurrences of all phrases within
@ -12796,12 +12849,13 @@ struct Fts5PhraseIter {
** Query for the details of phrase match iIdx within the current row.
** Phrase matches are numbered starting from zero, so the iIdx argument
** should be greater than or equal to zero and smaller than the value
** output by xInstCount().
** output by xInstCount(). If iIdx is less than zero or greater than
** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned.
**
** Usually, output parameter *piPhrase is set to the phrase number, *piCol
** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol
** to the column in which it occurs and *piOff the token offset of the
** first token of the phrase. Returns SQLITE_OK if successful, or an error
** code (i.e. SQLITE_NOMEM) if an error occurs.
** first token of the phrase. SQLITE_OK is returned if successful, or an
** error code (i.e. SQLITE_NOMEM) if an error occurs.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
@ -12827,6 +12881,10 @@ struct Fts5PhraseIter {
** Invoking Api.xUserData() returns a copy of the pointer passed as
** the third argument to pUserData.
**
** If parameter iPhrase is less than zero, or greater than or equal to
** the number of phrases in the query, as returned by xPhraseCount(),
** this function returns SQLITE_RANGE.
**
** If the callback function returns any value other than SQLITE_OK, the
** query is abandoned and the xQueryPhrase function returns immediately.
** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK.
@ -12941,9 +12999,42 @@ struct Fts5PhraseIter {
**
** xPhraseNextColumn()
** See xPhraseFirstColumn above.
**
** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken)
** This is used to access token iToken of phrase iPhrase of the current
** query. Before returning, output parameter *ppToken is set to point
** to a buffer containing the requested token, and *pnToken to the
** size of this buffer in bytes.
**
** If iPhrase or iToken are less than zero, or if iPhrase is greater than
** or equal to the number of phrases in the query as reported by
** xPhraseCount(), or if iToken is equal to or greater than the number of
** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken
are both zeroed.
**
** The output text is not a copy of the query text that specified the
** token. It is the output of the tokenizer module. For tokendata=1
** tables, this includes any embedded 0x00 and trailing data.
**
** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken)
** This is used to access token iToken of phrase hit iIdx within the
** current row. If iIdx is less than zero or greater than or equal to the
** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise,
** output variable (*ppToken) is set to point to a buffer containing the
** matching document token, and (*pnToken) to the size of that buffer in
** bytes. This API is not available if the specified token matches a
** prefix query term. In that case both output variables are always set
** to 0.
**
** The output text is not a copy of the document text that was tokenized.
** It is the output of the tokenizer module. For tokendata=1 tables, this
** includes any embedded 0x00 and trailing data.
**
** This API can be quite slow if used with an FTS5 table created with the
** "detail=none" or "detail=column" option.
*/
struct Fts5ExtensionApi {
int iVersion; /* Currently always set to 2 */
int iVersion; /* Currently always set to 3 */
void *(*xUserData)(Fts5Context*);
@ -12978,6 +13069,13 @@ struct Fts5ExtensionApi {
int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*);
void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol);
/* Below this point are iVersion>=3 only */
int (*xQueryToken)(Fts5Context*,
int iPhrase, int iToken,
const char **ppToken, int *pnToken
);
int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*);
};
/*

View File

@ -83,13 +83,13 @@ int findQueryID(const int id)
return -1;
}
int findUpstreamID(const char * upstreamString, const in_port_t port)
int _findUpstreamID(const char *upstreamString, const in_port_t port, int line, const char *func, const char *file)
{
// Go through already knows upstream servers and see if we used one of those
for(int upstreamID=0; upstreamID < counters->upstreams; upstreamID++)
for(int upstreamID = 0; upstreamID < counters->upstreams; upstreamID++)
{
// Get upstream pointer
upstreamsData* upstream = getUpstream(upstreamID, true);
upstreamsData* upstream = _getUpstream(upstreamID, false, line, func, file);
// Check if the returned pointer is valid before trying to access it
if(upstream == NULL)
@ -101,10 +101,10 @@ int findUpstreamID(const char * upstreamString, const in_port_t port)
// This upstream server is not known
// Store ID
const int upstreamID = counters->upstreams;
log_debug(DEBUG_ANY, "New upstream server: %s:%u (%i/%i)", upstreamString, port, upstreamID, counters->upstreams_MAX);
log_debug(DEBUG_GC, "New upstream server: %s:%u (ID %i)", upstreamString, port, upstreamID);
// Get upstream pointer
upstreamsData* upstream = getUpstream(upstreamID, false);
upstreamsData* upstream = _getUpstream(upstreamID, false, line, func, file);
if(upstream == NULL)
{
log_err("Encountered serious memory error in findupstreamID()");
@ -137,13 +137,34 @@ int findUpstreamID(const char * upstreamString, const in_port_t port)
return upstreamID;
}
int findDomainID(const char *domainString, const bool count)
static int get_next_free_domainID(void)
{
// Compare content of domain against known domain IP addresses
for(int domainID = 0; domainID < counters->domains; domainID++)
{
// Get domain pointer
domainsData* domain = getDomain(domainID, false);
// Check if the returned pointer is valid before trying to access it
if(domain == NULL)
continue;
// Check if the magic byte is set
if(domain->magic == 0x00)
return domainID;
}
// If we did not return until here, then we need to allocate a new domain ID
return counters->domains;
}
int _findDomainID(const char *domainString, const bool count, int line, const char *func, const char *file)
{
uint32_t domainHash = hashStr(domainString);
for(int domainID = 0; domainID < counters->domains; domainID++)
{
// Get domain pointer
domainsData* domain = getDomain(domainID, true);
domainsData* domain = _getDomain(domainID, false, line, func, file);
// Check if the returned pointer is valid before trying to access it
if(domain == NULL)
@ -157,23 +178,28 @@ int findDomainID(const char *domainString, const bool count)
if(strcmp(getstr(domain->domainpos), domainString) == 0)
{
if(count)
{
domain->count++;
domain->lastQuery = double_time();
}
return domainID;
}
}
// If we did not return until here, then this domain is not known
// Store ID
const int domainID = counters->domains;
const int domainID = get_next_free_domainID();
// Get domain pointer
domainsData* domain = getDomain(domainID, false);
domainsData* domain = _getDomain(domainID, false, line, func, file);
if(domain == NULL)
{
log_err("Encountered serious memory error in findDomainID()");
return -1;
}
log_debug(DEBUG_GC, "New domain: %s (ID %d)", domainString, domainID);
// Set magic byte
domain->magic = MAGICBYTE;
// Set its counter to 1 only if this domain is to be counted
@ -185,19 +211,41 @@ int findDomainID(const char *domainString, const bool count)
domain->domainpos = addstr(domainString);
// Store pre-computed hash of domain for faster lookups later on
domain->domainhash = hashStr(domainString);
domain->lastQuery = 0.0;
// Increase counter by one
counters->domains++;
return domainID;
}
int findClientID(const char *clientIP, const bool count, const bool aliasclient)
static int get_next_free_clientID(void)
{
// Compare content of client against known client IP addresses
for(int clientID = 0; clientID < counters->clients; clientID++)
{
// Get client pointer
clientsData* client = getClient(clientID, false);
// Check if the returned pointer is valid before trying to access it
if(client == NULL)
continue;
// Check if the magic byte is unset
if(client->magic == 0x00)
return clientID;
}
// If we did not return until here, then we need to allocate a new client ID
return counters->clients;
}
int _findClientID(const char *clientIP, const bool count, const bool aliasclient, int line, const char *func, const char *file)
{
// Compare content of client against known client IP addresses
for(int clientID=0; clientID < counters->clients; clientID++)
{
// Get client pointer
clientsData* client = getClient(clientID, true);
clientsData* client = _getClient(clientID, true, line, func, file);
// Check if the returned pointer is valid before trying to access it
if(client == NULL)
@ -223,16 +271,18 @@ int findClientID(const char *clientIP, const bool count, const bool aliasclient)
// If we did not return until here, then this client is definitely new
// Store ID
const int clientID = counters->clients;
const int clientID = get_next_free_clientID();
// Get client pointer
clientsData* client = getClient(clientID, false);
clientsData* client = _getClient(clientID, false, line, func, file);
if(client == NULL)
{
log_err("Encountered serious memory error in findClientID()");
return -1;
}
log_debug(DEBUG_GC, "New client: %s (ID %d)", clientIP, clientID);
// Set magic byte
client->magic = MAGICBYTE;
// Set its counter to 1
@ -264,7 +314,7 @@ int findClientID(const char *clientIP, const bool count, const bool aliasclient)
// Set all MAC address bytes to zero
client->hwlen = -1;
memset(client->hwaddr, 0, sizeof(client->hwaddr));
// This may be a alias-client, the ID is set elsewhere
// This may be an alias-client, the ID is set elsewhere
client->flags.aliasclient = aliasclient;
client->aliasclient_id = -1;
@ -319,6 +369,27 @@ void change_clientcount(clientsData *client, int total, int blocked, int overTim
}
}
static int get_next_free_cacheID(void)
{
// Compare content of cache against known cache IP addresses
for(int cacheID = 0; cacheID < counters->dns_cache_size; cacheID++)
{
// Get cache pointer
DNSCacheData* cache = getDNSCache(cacheID, false);
// Check if the returned pointer is valid before trying to access it
if(cache == NULL)
continue;
// Check if the magic byte is set
if(cache->magic == 0x00)
return cacheID;
}
// If we did not return until here, then we need to allocate a new cache ID
return counters->dns_cache_size;
}
int _findCacheID(const int domainID, const int clientID, const enum query_type query_type,
const bool create_new, const char *func, int line, const char *file)
{
@ -344,7 +415,7 @@ int _findCacheID(const int domainID, const int clientID, const enum query_type q
return -1;
// Get ID of new cache entry
const int cacheID = counters->dns_cache_size;
const int cacheID = get_next_free_cacheID();
// Get client pointer
DNSCacheData* dns_cache = _getDNSCache(cacheID, false, line, func, file);
@ -355,6 +426,9 @@ int _findCacheID(const int domainID, const int clientID, const enum query_type q
return -1;
}
log_debug(DEBUG_GC, "New cache entry: domainID %d, clientID %d, query_type %d (ID %d)",
domainID, clientID, query_type, cacheID);
// Initialize cache entry
dns_cache->magic = MAGICBYTE;
dns_cache->blocking_status = UNKNOWN_BLOCKED;
@ -362,7 +436,7 @@ int _findCacheID(const int domainID, const int clientID, const enum query_type q
dns_cache->clientID = clientID;
dns_cache->query_type = query_type;
dns_cache->force_reply = 0u;
dns_cache->domainlist_id = -1; // -1 = not set
dns_cache->list_id = -1; // -1 = not set
// Increase counter by one
counters->dns_cache_size++;
@ -441,7 +515,7 @@ const char *getClientIPString(const queriesData* query)
if(query->privacylevel < PRIVACY_HIDE_DOMAINS_CLIENTS)
{
// Get client pointer
const clientsData* client = getClient(query->clientID, false);
const clientsData* client = getClient(query->clientID, true);
// Check if the returned pointer is valid before trying to access it
if(client == NULL)
@ -484,12 +558,17 @@ void FTL_reset_per_client_domain_data(void)
for(int cacheID = 0; cacheID < counters->dns_cache_size; cacheID++)
{
// Reset all blocking yes/no fields for all domains and clients
// This forces a reprocessing of all available filters for any
// given domain and client the next time they are seen
DNSCacheData *dns_cache = getDNSCache(cacheID, true);
if(dns_cache != NULL)
dns_cache->blocking_status = UNKNOWN_BLOCKED;
// Get cache pointer
DNSCacheData* dns_cache = getDNSCache(cacheID, true);
// Check if the returned pointer is valid before trying to access it
if(dns_cache == NULL)
continue;
// Reset blocking status
dns_cache->blocking_status = UNKNOWN_BLOCKED;
// Reset domainlist ID
dns_cache->list_id = -1;
}
}
@ -926,19 +1005,27 @@ static const char* __attribute__ ((const)) query_status_str(const enum query_sta
return NULL;
}
void _query_set_status(queriesData *query, const enum query_status new_status, const char *func, const int line, const char *file)
void _query_set_status(queriesData *query, const enum query_status new_status, const bool init,
const char *func, const int line, const char *file)
{
// Debug logging
if(config.debug.status.v.b)
{
const char *oldstr = query->status < QUERY_STATUS_MAX ? query_status_str(query->status) : "INVALID";
if(query->status == new_status)
if(init)
{
const char *newstr = new_status < QUERY_STATUS_MAX ? query_status_str(new_status) : "INVALID";
log_debug(DEBUG_STATUS, "Query %i: status initialized: %s (%d) in %s() (%s:%i)",
query->id, newstr, new_status, func, short_path(file), line);
}
else if(query->status == new_status)
{
const char *oldstr = query->status < QUERY_STATUS_MAX ? query_status_str(query->status) : "INVALID";
log_debug(DEBUG_STATUS, "Query %i: status unchanged: %s (%d) in %s() (%s:%i)",
query->id, oldstr, query->status, func, short_path(file), line);
}
else
{
const char *oldstr = query->status < QUERY_STATUS_MAX ? query_status_str(query->status) : "INVALID";
const char *newstr = new_status < QUERY_STATUS_MAX ? query_status_str(new_status) : "INVALID";
log_debug(DEBUG_STATUS, "Query %i: status changed: %s (%d) -> %s (%d) in %s() (%s:%i)",
query->id, oldstr, query->status, newstr, new_status, func, short_path(file), line);
@ -949,30 +1036,40 @@ void _query_set_status(queriesData *query, const enum query_status new_status, c
if(new_status >= QUERY_STATUS_MAX)
return;
// Update counters
if(query->status != new_status)
const enum query_status old_status = query->status;
if(old_status == new_status && !init)
{
counters->status[query->status]--;
counters->status[new_status]++;
const int timeidx = getOverTimeID(query->timestamp);
if(is_blocked(query->status))
overTime[timeidx].blocked--;
if(is_blocked(new_status))
overTime[timeidx].blocked++;
if(query->status == QUERY_CACHE)
overTime[timeidx].cached--;
if(new_status == QUERY_CACHE)
overTime[timeidx].cached++;
if(query->status == QUERY_FORWARDED)
overTime[timeidx].forwarded--;
if(new_status == QUERY_FORWARDED)
overTime[timeidx].forwarded++;
// Nothing to do
return;
}
// Update status
// else: update global counters, ...
if(!init)
{
counters->status[old_status]--;
log_debug(DEBUG_STATUS, "status %d removed (!init), ID = %d, new count = %d", QUERY_UNKNOWN, query->id, counters->status[QUERY_UNKNOWN]);
}
counters->status[new_status]++;
log_debug(DEBUG_STATUS, "status %d set, ID = %d, new count = %d", new_status, query->id, counters->status[new_status]);
// ... update overTime counters, ...
const int timeidx = getOverTimeID(query->timestamp);
if(is_blocked(old_status) && !init)
overTime[timeidx].blocked--;
if(is_blocked(new_status))
overTime[timeidx].blocked++;
if(old_status == QUERY_CACHE && !init)
overTime[timeidx].cached--;
if(new_status == QUERY_CACHE)
overTime[timeidx].cached++;
if(old_status == QUERY_FORWARDED && !init)
overTime[timeidx].forwarded--;
if(new_status == QUERY_FORWARDED)
overTime[timeidx].forwarded++;
// ... and set new status
query->status = new_status;
}

View File

@ -30,6 +30,7 @@ typedef struct {
int domainID;
int clientID;
int upstreamID;
int cacheID;
int id; // the ID is a (signed) int in dnsmasq, so no need for a long int here
int CNAME_domainID; // only valid if query has a CNAME blocking status
int ede;
@ -63,7 +64,6 @@ typedef struct {
int count;
int failed;
unsigned int responses;
int overTime[OVERTIME_SLOTS];
size_t ippos;
size_t namepos;
double rtime;
@ -103,6 +103,7 @@ typedef struct {
int blockedcount;
uint32_t domainhash;
size_t domainpos;
double lastQuery;
} domainsData;
typedef struct {
@ -112,16 +113,19 @@ typedef struct {
enum query_type query_type;
int domainID;
int clientID;
int domainlist_id;
int list_id;
char *cname_target;
} DNSCacheData;
void strtolower(char *str);
uint32_t hashStr(const char *s) __attribute__((pure));
int findQueryID(const int id);
int findUpstreamID(const char * upstream, const in_port_t port);
int findDomainID(const char *domain, const bool count);
int findClientID(const char *client, const bool count, const bool aliasclient);
#define findUpstreamID(upstream, port) _findUpstreamID(upstream, port, __LINE__, __FUNCTION__, __FILE__)
int _findUpstreamID(const char *upstream, const in_port_t port, int line, const char *func, const char *file);
#define findDomainID(domain, count) _findDomainID(domain, count, __LINE__, __FUNCTION__, __FILE__)
int _findDomainID(const char *domain, const bool count, int line, const char *func, const char *file);
#define findClientID(client, count, aliasclient) _findClientID(client, count, aliasclient, __LINE__, __FUNCTION__, __FILE__)
int _findClientID(const char *client, const bool count, const bool aliasclient, int line, const char *func, const char *file);
#define findCacheID(domainID, clientID, query_type, create_new) _findCacheID(domainID, clientID, query_type, create_new, __FUNCTION__, __LINE__, __FILE__)
int _findCacheID(const int domainID, const int clientID, const enum query_type query_type, const bool create_new, const char *func, const int line, const char *file);
bool isValidIPv4(const char *addr);
@ -134,8 +138,9 @@ const char *get_cached_statuslist(void) __attribute__ ((pure));
int get_blocked_count(void) __attribute__ ((pure));
int get_forwarded_count(void) __attribute__ ((pure));
int get_cached_count(void) __attribute__ ((pure));
#define query_set_status(query, new_status) _query_set_status(query, new_status, __FUNCTION__, __LINE__, __FILE__)
void _query_set_status(queriesData *query, const enum query_status new_status, const char *func, const int line, const char *file);
#define query_set_status(query, new_status) _query_set_status(query, new_status, false, __FUNCTION__, __LINE__, __FILE__)
#define query_set_status_init(query, new_status) _query_set_status(query, new_status, true, __FUNCTION__, __LINE__, __FILE__)
void _query_set_status(queriesData *query, const enum query_status new_status, const bool init, const char *func, const int line, const char *file);
void FTL_reload_all_domainlists(void);
void FTL_reset_per_client_domain_data(void);

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -15,7 +15,8 @@
*/
#include "dnsmasq.h"
#include "../dnsmasq_interface.h"
#include "dnsmasq_interface.h"
#include "webserver/webserver.h"
static struct crec *cache_head = NULL, *cache_tail = NULL, **hash_table = NULL;
#ifdef HAVE_DHCP
@ -125,6 +126,7 @@ static const struct {
{ 258, "AVC" }, /* Application Visibility and Control [Wolfgang_Riedel] AVC/avc-completed-template 2016-02-26*/
{ 259, "DOA" }, /* Digital Object Architecture [draft-durand-doa-over-dns] DOA/doa-completed-template 2017-08-30*/
{ 260, "AMTRELAY" }, /* Automatic Multicast Tunneling Relay [RFC8777] AMTRELAY/amtrelay-completed-template 2019-02-06*/
{ 261, "RESINFO" }, /* Resolver Information as Key/Value Pairs https://datatracker.ietf.org/doc/draft-ietf-add-resolver-info/06/ */
{ 32768, "TA" }, /* DNSSEC Trust Authorities [Sam_Weiler][http://cameo.library.cmu.edu/][ Deploying DNSSEC Without a Signed Root. Technical Report 1999-19, Information Networking Institute, Carnegie Mellon University, April 2004.] 2005-12-13*/
{ 32769, "DLV" }, /* DNSSEC Lookaside Validation (OBSOLETE) [RFC8749][RFC4431] */
};
@ -440,18 +442,21 @@ unsigned int cache_remove_uid(const unsigned int uid)
{
int i;
unsigned int removed = 0;
struct crec *crecp, **up;
struct crec *crecp, *tmp, **up;
for (i = 0; i < hash_size; i++)
for (crecp = hash_table[i], up = &hash_table[i]; crecp; crecp = crecp->hash_next)
if ((crecp->flags & (F_HOSTS | F_DHCP | F_CONFIG)) && crecp->uid == uid)
{
*up = crecp->hash_next;
free(crecp);
removed++;
}
else
up = &crecp->hash_next;
for (crecp = hash_table[i], up = &hash_table[i]; crecp; crecp = tmp)
{
tmp = crecp->hash_next;
if ((crecp->flags & (F_HOSTS | F_DHCP | F_CONFIG)) && crecp->uid == uid)
{
*up = tmp;
free(crecp);
removed++;
}
else
up = &crecp->hash_next;
}
return removed;
}
@ -814,32 +819,28 @@ void cache_end_insert(void)
read_write(daemon->pipe_to_parent, (unsigned char *)name, m, 0);
read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->ttd, sizeof(new_chain->ttd), 0);
read_write(daemon->pipe_to_parent, (unsigned char *)&flags, sizeof(flags), 0);
if (flags & (F_IPV4 | F_IPV6 | F_DNSKEY | F_DS | F_RR))
read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr, sizeof(new_chain->addr), 0);
if (flags & F_RR)
{
read_write(daemon->pipe_to_parent, (unsigned char *)&new_chain->addr, sizeof(new_chain->addr), 0);
if (flags & F_RR)
{
/* A negative RR entry is possible and has no data, obviously. */
if (!(flags & F_NEG) && (flags & F_KEYTAG))
blockdata_write(new_chain->addr.rrblock.rrdata, new_chain->addr.rrblock.datalen, daemon->pipe_to_parent);
}
#ifdef HAVE_DNSSEC
if (flags & F_DNSKEY)
{
read_write(daemon->pipe_to_parent, (unsigned char *)&class, sizeof(class), 0);
blockdata_write(new_chain->addr.key.keydata, new_chain->addr.key.keylen, daemon->pipe_to_parent);
}
else if (flags & F_DS)
{
read_write(daemon->pipe_to_parent, (unsigned char *)&class, sizeof(class), 0);
/* A negative DS entry is possible and has no data, obviously. */
if (!(flags & F_NEG))
blockdata_write(new_chain->addr.ds.keydata, new_chain->addr.ds.keylen, daemon->pipe_to_parent);
}
#endif
/* A negative RR entry is possible and has no data, obviously. */
if (!(flags & F_NEG) && (flags & F_KEYTAG))
blockdata_write(new_chain->addr.rrblock.rrdata, new_chain->addr.rrblock.datalen, daemon->pipe_to_parent);
}
#ifdef HAVE_DNSSEC
if (flags & F_DNSKEY)
{
read_write(daemon->pipe_to_parent, (unsigned char *)&class, sizeof(class), 0);
blockdata_write(new_chain->addr.key.keydata, new_chain->addr.key.keylen, daemon->pipe_to_parent);
}
else if (flags & F_DS)
{
read_write(daemon->pipe_to_parent, (unsigned char *)&class, sizeof(class), 0);
/* A negative DS entry is possible and has no data, obviously. */
if (!(flags & F_NEG))
blockdata_write(new_chain->addr.ds.keydata, new_chain->addr.ds.keylen, daemon->pipe_to_parent);
}
#endif
}
}
@ -850,7 +851,18 @@ void cache_end_insert(void)
if (daemon->pipe_to_parent != -1)
{
ssize_t m = -1;
read_write(daemon->pipe_to_parent, (unsigned char *)&m, sizeof(m), 0);
#ifdef HAVE_DNSSEC
/* Sneak out possibly updated crypto HWM values. */
m = daemon->metrics[METRIC_CRYPTO_HWM];
read_write(daemon->pipe_to_parent, (unsigned char *)&m, sizeof(m), 0);
m = daemon->metrics[METRIC_SIG_FAIL_HWM];
read_write(daemon->pipe_to_parent, (unsigned char *)&m, sizeof(m), 0);
m = daemon->metrics[METRIC_WORK_HWM];
read_write(daemon->pipe_to_parent, (unsigned char *)&m, sizeof(m), 0);
#endif
}
new_chain = NULL;
@ -869,7 +881,7 @@ int cache_recv_insert(time_t now, int fd)
cache_start_insert();
while(1)
while (1)
{
if (!read_write(fd, (unsigned char *)&m, sizeof(m), 1))
@ -877,13 +889,29 @@ int cache_recv_insert(time_t now, int fd)
if (m == -1)
{
#ifdef HAVE_DNSSEC
/* Sneak in possibly updated crypto HWM. */
if (!read_write(fd, (unsigned char *)&m, sizeof(m), 1))
return 0;
if (m > daemon->metrics[METRIC_CRYPTO_HWM])
daemon->metrics[METRIC_CRYPTO_HWM] = m;
if (!read_write(fd, (unsigned char *)&m, sizeof(m), 1))
return 0;
if (m > daemon->metrics[METRIC_SIG_FAIL_HWM])
daemon->metrics[METRIC_SIG_FAIL_HWM] = m;
if (!read_write(fd, (unsigned char *)&m, sizeof(m), 1))
return 0;
if (m > daemon->metrics[METRIC_WORK_HWM])
daemon->metrics[METRIC_WORK_HWM] = m;
#endif
cache_end_insert();
return 1;
}
if (!read_write(fd, (unsigned char *)daemon->namebuff, m, 1) ||
!read_write(fd, (unsigned char *)&ttd, sizeof(ttd), 1) ||
!read_write(fd, (unsigned char *)&flags, sizeof(flags), 1))
!read_write(fd, (unsigned char *)&flags, sizeof(flags), 1) ||
!read_write(fd, (unsigned char *)&addr, sizeof(addr), 1))
return 0;
daemon->namebuff[m] = 0;
@ -914,30 +942,23 @@ int cache_recv_insert(time_t now, int fd)
{
unsigned short class = C_IN;
if (flags & (F_IPV4 | F_IPV6 | F_DNSKEY | F_DS | F_RR))
{
if (!read_write(fd, (unsigned char *)&addr, sizeof(addr), 1))
return 0;
if ((flags & F_RR) && !(flags & F_NEG) && (flags & F_KEYTAG)
&& !(addr.rrblock.rrdata = blockdata_read(fd, addr.rrblock.datalen)))
return 0;
if ((flags & F_RR) && !(flags & F_NEG) && (flags & F_KEYTAG)
&& !(addr.rrblock.rrdata = blockdata_read(fd, addr.rrblock.datalen)))
return 0;
#ifdef HAVE_DNSSEC
if (flags & F_DNSKEY)
{
if (!read_write(fd, (unsigned char *)&class, sizeof(class), 1) ||
!(addr.key.keydata = blockdata_read(fd, addr.key.keylen)))
return 0;
}
else if (flags & F_DS)
{
if (!read_write(fd, (unsigned char *)&class, sizeof(class), 1) ||
(!(flags & F_NEG) && !(addr.key.keydata = blockdata_read(fd, addr.key.keylen))))
return 0;
}
#endif
if (flags & F_DNSKEY)
{
if (!read_write(fd, (unsigned char *)&class, sizeof(class), 1) ||
!(addr.key.keydata = blockdata_read(fd, addr.key.keylen)))
return 0;
}
else if (flags & F_DS)
{
if (!read_write(fd, (unsigned char *)&class, sizeof(class), 1) ||
(!(flags & F_NEG) && !(addr.key.keydata = blockdata_read(fd, addr.key.keylen))))
return 0;
}
#endif
crecp = really_insert(daemon->namebuff, &addr, class, now, ttl, flags);
}
}
@ -1742,9 +1763,20 @@ int cache_make_stat(struct txt_record *t)
#endif
/* Pi-hole modification */
case TXT_PRIVACYLEVEL:
sprintf(buff+1, "%d", *pihole_privacylevel);
break;
case TXT_API_DOMAIN:
{
t->len = get_api_string(&buff, true);
t->txt = (unsigned char *)buff;
return 1;
}
case TXT_API_LOCAL:
{
t->len = get_api_string(&buff, false);
t->txt = (unsigned char *)buff;
return 1;
}
/* -------------------- */
case TXT_STAT_SERVERS:
@ -1827,8 +1859,18 @@ static void dump_cache_entry(struct crec *cache, time_t now)
p = buff;
*a = 0;
if (strlen(n) == 0 && !(cache->flags & F_REVERSE))
n = "<Root>";
if (cache->flags & F_REVERSE)
{
if ((cache->flags & F_NEG))
n = "";
}
else
{
if (strlen(n) == 0)
n = "<Root>";
}
p += sprintf(p, "%-30.30s ", sanitise(n));
if ((cache->flags & F_CNAME) && !is_outdated_cname_pointer(cache))
a = sanitise(cache_get_cname_target(cache));
@ -1993,9 +2035,19 @@ void dump_cache(time_t now)
#ifdef HAVE_AUTH
my_syslog(LOG_INFO, _("queries for authoritative zones %u"), daemon->metrics[METRIC_DNS_AUTH_ANSWERED]);
#endif
#ifdef HAVE_DNSSEC
my_syslog(LOG_INFO, _("DNSSEC per-query subqueries HWM %u"), daemon->metrics[METRIC_WORK_HWM]);
my_syslog(LOG_INFO, _("DNSSEC per-query crypto work HWM %u"), daemon->metrics[METRIC_CRYPTO_HWM]);
my_syslog(LOG_INFO, _("DNSSEC per-RRSet signature fails HWM %u"), daemon->metrics[METRIC_SIG_FAIL_HWM]);
#endif
blockdata_report();
my_syslog(LOG_INFO, _("child processes for TCP requests: in use %zu, highest since last SIGUSR1 %zu, max allowed %zu."),
daemon->metrics[METRIC_TCP_CONNECTIONS],
daemon->max_procs_used,
daemon->max_procs);
daemon->max_procs_used = daemon->metrics[METRIC_TCP_CONNECTIONS];
/* sum counts from different records for same server */
for (serv = daemon->servers; serv; serv = serv->next)
serv->flags &= ~SERV_MARK;
@ -2149,6 +2201,11 @@ const char *edestr(int ede)
case EDE_NO_AUTH: return "no reachable authority";
case EDE_NETERR: return "network error";
case EDE_INVALID_DATA: return "invalid data";
case EDE_SIG_E_B_V: return "signature expired before valid";
case EDE_TOO_EARLY: return "too early";
case EDE_UNS_NS3_ITER: return "unsupported NSEC3 iterations value";
case EDE_UNABLE_POLICY: return "uanble to conform to policy";
case EDE_SYNTHESIZED: return "synthesized";
default: return "unknown";
}
}

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -15,14 +15,17 @@
*/
#define FTABSIZ 150 /* max number of outstanding requests (default) */
#define MAX_PROCS 60 /* max no children for TCP requests */
#define MAX_PROCS 60 /* default max no children for TCP requests */
#define CHILD_LIFETIME 300 /* secs 'till terminated (RFC1035 suggests > 120s) */
#define TCP_MAX_QUERIES 100 /* Maximum number of queries per incoming TCP connection */
#define TCP_BACKLOG 32 /* kernel backlog limit for TCP connections */
#define EDNS_PKTSZ 1232 /* default max EDNS.0 UDP packet from from /dnsflagday.net/2020 */
#define SAFE_PKTSZ 1232 /* "go anywhere" UDP packet size, see https://dnsflagday.net/2020/ */
#define KEYBLOCK_LEN 40 /* choose to minimise fragmentation when storing DNSSEC keys */
#define DNSSEC_WORK 50 /* Max number of queries to validate one question */
#define DNSSEC_LIMIT_WORK 40 /* Max number of queries to validate one question */
#define DNSSEC_LIMIT_SIG_FAIL 20 /* Number of signature that can fail to validate in one answer */
#define DNSSEC_LIMIT_CRYPTO 200 /* max no. of crypto operations to validate one query. */
#define DNSSEC_LIMIT_NSEC3_ITERS 150 /* Max. number if iterations allowed in NSEC3 record. */
#define TIMEOUT 10 /* drop UDP queries after TIMEOUT seconds */
#define SMALL_PORT_RANGE 30 /* If DNS port range is smaller than this, use different allocation. */
#define FORWARD_TEST 1000 /* try all servers every 1000 queries */
@ -205,7 +208,7 @@ RESOLVFILE
/* Pi-hole definitions */
#define HAVE_LUASCRIPT
#define HAVE_IDN
#define HAVE_LIBIDN2
#define HAVE_DNSSEC
#ifdef DNSMASQ_ALL_OPTS
#define HAVE_DBUS

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by

View File

@ -1,4 +1,4 @@
/* dnsmasq is Copyright (c) 2000-2023 Simon Kelley
/* dnsmasq is Copyright (c) 2000-2024 Simon Kelley
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -92,7 +92,7 @@ void dhcp6_packet(time_t now)
struct iface_param parm;
struct cmsghdr *cmptr;
struct msghdr msg;
int if_index = 0;
uint32_t if_index = 0;
union {
struct cmsghdr align; /* this ensures alignment */
char control6[CMSG_SPACE(sizeof(struct in6_pktinfo))];

Some files were not shown because too many files have changed in this diff Show More