TimeScale Training: Difference between revisions
Ian.readman (talk | contribs) No edit summary |
Ian.readman (talk | contribs) No edit summary |
||
(26 intermediate revisions by 2 users not shown) | |||
Line 1: | Line 1: | ||
== Installation == | |||
https://docs.timescale.com/self-hosted/latest/install/ | |||
=== Debian === | |||
<pre> | |||
echo "deb https://packagecloud.io/timescale/timescaledb/debian/ $(lsb_release -c -s) main" | / | |||
sudo tee /etc/apt/sources.list.d/timescaledb.list | |||
wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | / | |||
sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/timescaledb.gpg | |||
sudo apt update | |||
# Get latest version | |||
sudo apt install timescaledb-2-postgresql-17 | |||
# Get specific version | |||
sudo apt-get install timescaledb-2-postgresql-15='2.14.2*' timescaledb-2-loader-postgresql-15='2.14.2*' | |||
</pre> | |||
When running timescaledb-tune, accept the option to update shared_preload_libraries. Other settings are optional </br> | |||
https://docs.timescale.com/self-hosted/latest/configuration/timescaledb-tune | |||
<pre> | |||
sudo timescaledb-tune | |||
sudo systemctl restart postgresql | |||
</pre> | |||
<pre> | |||
CREATE EXTENSION IF NOT EXISTS timescaledb; | |||
</pre> | |||
=== Windows === | |||
https://docs.timescale.com/self-hosted/latest/install/installation-windows/#install-and-configure-timescaledb-on-postgresql | |||
== Setup == | == Setup == | ||
<pre> | <pre> | ||
SET search_path = "$user", public, "timescaledb_information"; | SET search_path = "$user", public, "timescaledb_information"; | ||
SET timezone TO 'Europe/ | SET timezone TO 'Europe/London'; | ||
\dv timescaledb_information.* | \dv timescaledb_information.* | ||
Line 15: | Line 46: | ||
</pre> | </pre> | ||
== Create Hypertable == | === Create Hypertable === | ||
<pre> | <pre> | ||
SELECT create_hypertable('metrics', by_range('device_time')); | SELECT create_hypertable('metrics', by_range('device_time')); | ||
Line 34: | Line 65: | ||
</pre> | </pre> | ||
== Chunk Interval == | === Chunk Interval === | ||
The default chunk interval is 7 days. | |||
<pre> | <pre> | ||
SELECT create_hypertable('metrics', by_range('time', INTERVAL '1 month')); | SELECT create_hypertable('metrics', by_range('time', INTERVAL '1 month')); | ||
Line 42: | Line 74: | ||
</pre> | </pre> | ||
== Generate Time Series Data == | === Generate Time Series Data === | ||
<pre> | <pre> | ||
TRUNCATE TABLE metrics RESTART IDENTITY; | TRUNCATE TABLE metrics RESTART IDENTITY; | ||
Line 50: | Line 82: | ||
generate_series(1,1000) device_id | generate_series(1,1000) device_id | ||
); | ); | ||
</pre> | |||
== Table Size == | |||
<pre> | |||
SELECT PG_SIZE_PRETTY (pg_total_relation_size('metrics')); | |||
SELECT PG_SIZE_PRETTY (hypertable_size('metrics')); | |||
</pre> | </pre> | ||
Line 64: | Line 102: | ||
</pre> | </pre> | ||
== Compression | == Compression == | ||
=== Old === | |||
<pre> | <pre> | ||
ALTER TABLE metrics | ALTER TABLE metrics | ||
Line 83: | Line 122: | ||
</pre> | </pre> | ||
== | === New (HyperCore) === | ||
<pre> | <pre> | ||
ALTER TABLE metrics SET ( | ALTER TABLE metrics SET ( | ||
Line 92: | Line 131: | ||
SELECT pg_size_pretty(before_compression_total_bytes) as before, | SELECT pg_size_pretty(before_compression_total_bytes) as before, | ||
pg_size_pretty(after_compression_total_bytes) as after | |||
FROM hypertable_compression_stats('metrics'); | FROM hypertable_compression_stats('metrics'); | ||
SELECT pg_size_pretty(before_compression_table_bytes) as before_table, | SELECT pg_size_pretty(before_compression_table_bytes) as before_table, | ||
pg_size_pretty(before_compression_index_bytes) as before_index, | pg_size_pretty(before_compression_index_bytes) as before_index, | ||
pg_size_pretty(before_compression_toast_bytes) as before_toast, | |||
pg_size_pretty(before_compression_total_bytes) as before_total, | |||
pg_size_pretty(after_compression_table_bytes) as after_table, | |||
pg_size_pretty(after_compression_index_bytes) as after_index, | pg_size_pretty(after_compression_index_bytes) as after_index, | ||
pg_size_pretty(after_compression_toast_bytes) as after_toast, | |||
pg_size_pretty(after_compression_total_bytes) as after_total | |||
FROM hypertable_columnstore_stats('metrics'); | FROM hypertable_columnstore_stats('metrics'); | ||
Line 135: | Line 174: | ||
<pre> | <pre> | ||
SELECT name, setting, short_desc FROM pg_settings WHERE name like 'timescaledb.%'; | SELECT name, setting, short_desc FROM pg_settings WHERE name like 'timescaledb.%'; | ||
</pre> | |||
== Continuous Aggregates == | |||
=== Create === | |||
The materialised view will be created immediately unless you use the option <b>WITH NO DATA</b> | |||
<pre> | |||
CREATE MATERIALIZED VIEW daily_cpu_stats | |||
WITH (timescaledb.continuous) AS | |||
SELECT time_bucket('1 day', device_time) AS day, | |||
device_id, | |||
avg(cpu_usage) AS avg_cpu, | |||
max(cpu_usage) AS max_cpu | |||
FROM metrics | |||
GROUP BY day, device_id; | |||
</pre> | |||
=== Refresh === | |||
Refresh all data between the dates specified. | |||
<pre> | |||
CALL refresh_continuous_aggregate('daily_cpu_stats', '2023-01-01', '2025-04-01'); | |||
CALL refresh_continuous_aggregate('daily_cpu_stats', date_trunc('month', now())', date_trunc('day', now())); | |||
</pre> | |||
=== Add Policy === | |||
Create a policy that refreshes data between 1 month old and 1 day. This occurs every hour. </br> | |||
This means data older than 1 month will never be removed from the CA, even if it is deleted from the underlying table. | |||
<pre> | |||
SELECT add_continuous_aggregate_policy('daily_cpu_stats', | |||
start_offset => INTERVAL '1 month', | |||
end_offset => INTERVAL '1 day', | |||
schedule_interval => INTERVAL '1 h'); | |||
</pre> | </pre> |
Latest revision as of 17:02, 20 April 2025
Installation[edit]
https://docs.timescale.com/self-hosted/latest/install/
Debian[edit]
echo "deb https://packagecloud.io/timescale/timescaledb/debian/ $(lsb_release -c -s) main" | / sudo tee /etc/apt/sources.list.d/timescaledb.list wget --quiet -O - https://packagecloud.io/timescale/timescaledb/gpgkey | / sudo gpg --dearmor -o /etc/apt/trusted.gpg.d/timescaledb.gpg sudo apt update # Get latest version sudo apt install timescaledb-2-postgresql-17 # Get specific version sudo apt-get install timescaledb-2-postgresql-15='2.14.2*' timescaledb-2-loader-postgresql-15='2.14.2*'
When running timescaledb-tune, accept the option to update shared_preload_libraries. Other settings are optional
https://docs.timescale.com/self-hosted/latest/configuration/timescaledb-tune
sudo timescaledb-tune sudo systemctl restart postgresql
CREATE EXTENSION IF NOT EXISTS timescaledb;
Windows[edit]
Setup[edit]
SET search_path = "$user", public, "timescaledb_information"; SET timezone TO 'Europe/London'; \dv timescaledb_information.*
DROP TABLE IF EXISTS metrics; CREATE TABLE metrics (id integer PRIMARY KEY GENERATED ALWAYS AS IDENTITY, device_time timestamptz NOT NULL, device_id integer, cpu_usage numeric);
Create Hypertable[edit]
SELECT create_hypertable('metrics', by_range('device_time'));
ERROR: cannot create a unique index without the column "device_time" (used in partitioning)
HINT: If you're creating a hypertable on a table with a primary key, ensure the partitioning column is part of the primary or composite key.
ALTER TABLE metrics DROP CONSTRAINT metrics_pkey; ALTER TABLE metrics ADD PRIMARY KEY (id, device_time); SELECT create_hypertable('metrics', by_range('device_time'));
If the existing table already has data in it then you must use the "migrate_data" option.
SELECT create_hypertable('metrics', by_range('device_time'), migrate_data => true);
Chunk Interval[edit]
The default chunk interval is 7 days.
SELECT create_hypertable('metrics', by_range('time', INTERVAL '1 month')); SELECT set_chunk_time_interval('metrics', INTERVAL '14 days'); SELECT hypertable_name, column_name, time_interval FROM timescaledb_information.dimensions;
Generate Time Series Data[edit]
TRUNCATE TABLE metrics RESTART IDENTITY; INSERT INTO metrics (device_time, device_id, cpu_usage) ( SELECT time, device_id, random()*100 as cpu_usage FROM generate_series(date_trunc('hour',now()) - INTERVAL '6 months', now(), INTERVAL '5 minutes') as time, generate_series(1,1000) device_id );
Table Size[edit]
SELECT PG_SIZE_PRETTY (pg_total_relation_size('metrics')); SELECT PG_SIZE_PRETTY (hypertable_size('metrics'));
Check Statistics[edit]
ANALYZE metrics; SELECT tablename, attname, n_distinct FROM pg_stats WHERE tablename = 'metrics';
Examine Chunks[edit]
\d+ metrics SELECT MAX(device_time), MIN(device_time) FROM _timescaledb_internal.<chunk>;
Compression[edit]
Old[edit]
ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_segmentby='device_id', timescaledb.compress_orderby='device_time DESC'); SELECT compress_chunk(c) from show_chunks('metrics') c; SELECT pg_size_pretty(before_compression_total_bytes) as before, pg_size_pretty(after_compression_total_bytes) as after FROM hypertable_compression_stats('metrics'); SELECT add_compression_policy('metrics', INTERVAL '1 days'); SELECT remove_compression_policy('metrics'); SELECT decompress_chunk(c, true) FROM show_chunks('metrics') c;
New (HyperCore)[edit]
ALTER TABLE metrics SET ( timescaledb.enable_columnstore = true, timescaledb.segmentby = 'device_id'); CALL add_columnstore_policy('metrics', after => INTERVAL '1d'); SELECT pg_size_pretty(before_compression_total_bytes) as before, pg_size_pretty(after_compression_total_bytes) as after FROM hypertable_compression_stats('metrics'); SELECT pg_size_pretty(before_compression_table_bytes) as before_table, pg_size_pretty(before_compression_index_bytes) as before_index, pg_size_pretty(before_compression_toast_bytes) as before_toast, pg_size_pretty(before_compression_total_bytes) as before_total, pg_size_pretty(after_compression_table_bytes) as after_table, pg_size_pretty(after_compression_index_bytes) as after_index, pg_size_pretty(after_compression_toast_bytes) as after_toast, pg_size_pretty(after_compression_total_bytes) as after_total FROM hypertable_columnstore_stats('metrics'); SELECT * FROM timescaledb_information.jobs WHERE proc_name='policy_compression';
Tablespaces[edit]
https://docs.timescale.com/self-hosted/latest/manage-storage
CREATE TABLESPACE historic_data OWNER postgres LOCATION '/home/postgres/timescale'; SELECT show_chunks('metrics', older_than => INTERVAL '5 months'); SELECT move_chunk( chunk => '_timescaledb_internal._hyper_1_4_chunk', destination_tablespace => 'historic_data', index_destination_tablespace => 'historic_data', reorder_index => '_timescaledb_internal._hyper_1_4_chunk_netdata_time_idx', verbose => TRUE );
Limitations[edit]
https://docs.timescale.com/timescaledb/latest/overview/limitations
- Time dimensions (columns) used for partitioning cannot have NULL values.
- Unique indexes must include all columns that are partitioning dimensions.
- UPDATE statements that move values between partitions (chunks) are not supported.
- Foreign key constraints from a hypertable referencing another hypertable are not supported.
Parameters[edit]
SELECT name, setting, short_desc FROM pg_settings WHERE name like 'timescaledb.%';
Continuous Aggregates[edit]
Create[edit]
The materialised view will be created immediately unless you use the option WITH NO DATA
CREATE MATERIALIZED VIEW daily_cpu_stats WITH (timescaledb.continuous) AS SELECT time_bucket('1 day', device_time) AS day, device_id, avg(cpu_usage) AS avg_cpu, max(cpu_usage) AS max_cpu FROM metrics GROUP BY day, device_id;
Refresh[edit]
Refresh all data between the dates specified.
CALL refresh_continuous_aggregate('daily_cpu_stats', '2023-01-01', '2025-04-01'); CALL refresh_continuous_aggregate('daily_cpu_stats', date_trunc('month', now())', date_trunc('day', now()));
Add Policy[edit]
Create a policy that refreshes data between 1 month old and 1 day. This occurs every hour.
This means data older than 1 month will never be removed from the CA, even if it is deleted from the underlying table.
SELECT add_continuous_aggregate_policy('daily_cpu_stats', start_offset => INTERVAL '1 month', end_offset => INTERVAL '1 day', schedule_interval => INTERVAL '1 h');