From 5726611e1c49865bbd1cab562f139c687dd83255 Mon Sep 17 00:00:00 2001 From: Mats Kindahl Date: Fri, 27 Oct 2023 09:55:59 +0200 Subject: [PATCH] Document new hypertable API (#2732) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just make minimal changes to document the new API. Sections on using the new API will be following. Co-authored-by: Erik Nordström <819732+erimatnor@users.noreply.github.com> --- _partials/_create-hypertable-blockchain.md | 2 +- _partials/_create-hypertable-energy.md | 2 +- _partials/_create-hypertable-nyctaxis.md | 3 +- .../_create-hypertable-twelvedata-crypto.md | 2 +- .../_create-hypertable-twelvedata-stocks.md | 2 +- ...-time-previously-materialized-not-shown.md | 3 +- api/add_data_node.md | 2 +- api/add_dimension.md | 80 +++----- api/add_dimension_old.md | 170 ++++++++++++++++ api/chunks.md | 2 +- api/compression_settings.md | 2 +- api/create_distributed_hypertable.md | 18 +- api/create_hypertable.md | 84 ++------ api/create_hypertable_old.md | 185 ++++++++++++++++++ api/delete_data_node.md | 2 +- api/detach_data_node.md | 2 +- api/dimension_info.md | 99 ++++++++++ api/dimensions.md | 9 +- api/drop_chunks.md | 2 +- api/page-index/page-index.js | 8 + api/set_number_partitions.md | 2 +- api/time_bucket_ng.md | 3 +- getting-started/tables-hypertables.md | 2 +- .../dual-write-from-postgres.md | 2 +- mst/ingest-data.md | 2 +- quick-start/golang.md | 4 +- quick-start/java.md | 8 +- quick-start/node.md | 2 +- quick-start/python.md | 2 +- quick-start/ruby.md | 2 +- self-hosted/migration/same-db.md | 2 +- self-hosted/migration/schema-then-data.md | 6 +- .../multinode-grow-shrink.md | 2 +- use-timescale/continuous-aggregates/time.md | 3 +- use-timescale/extensions/postgis.md | 2 +- use-timescale/hyperfunctions/counter-aggs.md | 2 +- .../hypertables/change-chunk-intervals.md | 3 +- use-timescale/hypertables/create.md | 5 +- .../hypertables-and-unique-indexes.md | 20 +- use-timescale/ingest-data/import-csv.md | 2 +- .../integrations/data-ingest/telegraf.md | 2 +- .../query-data/advanced-analytic-queries.md | 2 +- .../schema-management/about-constraints.md | 2 +- use-timescale/schema-management/indexing.md | 4 +- 44 files changed, 578 insertions(+), 187 deletions(-) create mode 100644 api/add_dimension_old.md create mode 100644 api/create_hypertable_old.md create mode 100644 api/dimension_info.md diff --git a/_partials/_create-hypertable-blockchain.md b/_partials/_create-hypertable-blockchain.md index ac3b587e2a..1e0feba516 100644 --- a/_partials/_create-hypertable-blockchain.md +++ b/_partials/_create-hypertable-blockchain.md @@ -36,7 +36,7 @@ with Timescale tables similar to standard PostgreSQL. the timestamp data to use for partitioning: ```sql - SELECT create_hypertable('transactions', 'time'); + SELECT create_hypertable('transactions', by_range('time')); ``` 1. Create an index on the `hash` column to make queries for individual diff --git a/_partials/_create-hypertable-energy.md b/_partials/_create-hypertable-energy.md index 20b22f3ed7..8d85379430 100644 --- a/_partials/_create-hypertable-energy.md +++ b/_partials/_create-hypertable-energy.md @@ -27,7 +27,7 @@ with Timescale tables similar to standard PostgreSQL. the timestamp data to use for partitioning: ```sql - SELECT create_hypertable('metrics', 'created'); + SELECT create_hypertable('metrics', by_range('created')); ``` diff --git a/_partials/_create-hypertable-nyctaxis.md b/_partials/_create-hypertable-nyctaxis.md index aca7bb6105..1cd0904294 100644 --- a/_partials/_create-hypertable-nyctaxis.md +++ b/_partials/_create-hypertable-nyctaxis.md @@ -42,7 +42,8 @@ with Timescale tables similar to standard PostgreSQL. the timestamp data to use for partitioning: ```sql - SELECT create_hypertable('rides', 'pickup_datetime', 'payment_type', 2, create_default_indexes=>FALSE); + SELECT create_hypertable('rides', by_range('pickup_datetime'), create_default_indexes=>FALSE); + SELECT add_dimension('rides', by_hash('payment_type', 2)); ``` 1. Create an index to support efficient queries by vendor, rate code, and diff --git a/_partials/_create-hypertable-twelvedata-crypto.md b/_partials/_create-hypertable-twelvedata-crypto.md index 8f4d2517da..72a1f80981 100644 --- a/_partials/_create-hypertable-twelvedata-crypto.md +++ b/_partials/_create-hypertable-twelvedata-crypto.md @@ -29,7 +29,7 @@ with Timescale tables similar to standard PostgreSQL. the timestamp data to use for partitioning: ```sql - SELECT create_hypertable('crypto_ticks', 'time'); + SELECT create_hypertable('crypto_ticks', by_range('time')); ``` diff --git a/_partials/_create-hypertable-twelvedata-stocks.md b/_partials/_create-hypertable-twelvedata-stocks.md index b5ea98c921..85727a9fdc 100644 --- a/_partials/_create-hypertable-twelvedata-stocks.md +++ b/_partials/_create-hypertable-twelvedata-stocks.md @@ -29,7 +29,7 @@ with Timescale tables similar to standard PostgreSQL. the timestamp data to use for partitioning: ```sql - SELECT create_hypertable('stocks_real_time','time'); + SELECT create_hypertable('stocks_real_time', by_range('time')); ``` 1. Create an index to support efficient queries on the `symbol` and `time` diff --git a/_troubleshooting/caggs-real-time-previously-materialized-not-shown.md b/_troubleshooting/caggs-real-time-previously-materialized-not-shown.md index c93c0fc4c1..0f05e9831b 100644 --- a/_troubleshooting/caggs-real-time-previously-materialized-not-shown.md +++ b/_troubleshooting/caggs-real-time-previously-materialized-not-shown.md @@ -37,8 +37,7 @@ CREATE TABLE conditions( temperature INT NOT NULL); SELECT create_hypertable( - 'conditions', 'day', - chunk_time_interval => INTERVAL '1 day' + 'conditions', by_range('day', INTERVAL '1 day') ); INSERT INTO conditions (day, city, temperature) VALUES diff --git a/api/add_data_node.md b/api/add_data_node.md index 55ff6178ae..54e77f8784 100644 --- a/api/add_data_node.md +++ b/api/add_data_node.md @@ -102,7 +102,7 @@ TimescaleDB extension on the data node unless it is already installed. ### Sample usage If you have an existing hypertable `conditions` and want to use `time` -as the time partitioning column and `location` as the space partitioning +as the range partitioning column and `location` as the hash partitioning column. You also want to distribute the chunks of the hypertable on two data nodes `dn1.example.com` and `dn2.example.com`: diff --git a/api/add_dimension.md b/api/add_dimension.md index eef59c7a56..56a881d63f 100644 --- a/api/add_dimension.md +++ b/api/add_dimension.md @@ -13,7 +13,9 @@ api: Add an additional partitioning dimension to a Timescale hypertable. The column selected as the dimension can either use interval -partitioning (for example, for a second time partition) or hash partitioning. +partitioning (for example, for a second range partition) or hash partitioning. + +**Note: this reference describes the new generalized hypertable API. The [old interface for `add_dimension` is also available](add_dimension_old.md).** The `add_dimension` command can only be executed after a table has been @@ -21,15 +23,15 @@ converted to a hypertable (via `create_hypertable`), but must similarly be run only on an empty hypertable. -**Space partitions**: Using space partitions is highly recommended +**Hash partitions (previosly called space partitions)**: Using hash partitions is highly recommended for [distributed hypertables][distributed-hypertables] to achieve efficient scale-out performance. For [regular hypertables][regular-hypertables] that exist only on a single node, additional partitioning can be used for specialized use cases and not recommended for most users. -Space partitions use hashing: Every distinct item is hashed to one of -*N* buckets. Remember that we are already using (flexible) time -intervals to manage chunk sizes; the main purpose of space +Every distinct item in hash partitioning is hashed to one of +*N* buckets. Remember that we are already using (flexible) range +intervals to manage chunk sizes; the main purpose of hash partitioning is to enable parallelization across multiple data nodes (in the case of distributed hypertables) or across multiple disks within the same time interval @@ -37,14 +39,14 @@ across multiple disks within the same time interval ### Parallelizing queries across multiple data nodes -In a distributed hypertable, space partitioning enables inserts to be +In a distributed hypertable, hash partitioning enables inserts to be parallelized across data nodes, even while the inserted rows share timestamps from the same time interval, and thus increases the ingest rate. Query performance also benefits by being able to parallelize queries across nodes, particularly when full or partial aggregations can be "pushed down" to data nodes (for example, as in the query `avg(temperature) FROM conditions GROUP BY hour, location` -when using `location` as a space partition). Please see our +when using `location` as a hash partition). Please see our [best practices about partitioning in distributed hypertables][distributed-hypertable-partitioning-best-practices] for more information. @@ -71,10 +73,10 @@ disks, single query to multiple disks in parallel). The multiple tablespace approach only supports the former. With a RAID setup, *no spatial partitioning is required*. -That said, when using space partitions, we recommend using 1 -space partition per disk. +That said, when using hash partitions, we recommend using 1 +hash partition per disk. -Timescale does *not* benefit from a very large number of space +Timescale does *not* benefit from a very large number of hash partitions (such as the number of unique items you expect in partition field). A very large number of such partitions leads both to poorer per-partition load balancing (the mapping of items to partitions using @@ -86,7 +88,7 @@ queries. |Name|Type|Description| |-|-|-| |`hypertable`|REGCLASS|Hypertable to add the dimension to| -|`column_name`|TEXT|Column to partition by| +|`dimension`|DIMENSION_INFO | Dimension to partition by| ### Optional arguments @@ -102,66 +104,44 @@ queries. |Column|Type|Description| |-|-|-| |`dimension_id`|INTEGER|ID of the dimension in the TimescaleDB internal catalog| -|`schema_name`|TEXT|Schema name of the hypertable| -|`table_name`|TEXT|Table name of the hypertable| -|`column_name`|TEXT|Column name of the column to partition by| |`created`|BOOLEAN|True if the dimension was added, false when `if_not_exists` is true and no dimension was added| -When executing this function, either `number_partitions` or -`chunk_time_interval` must be supplied, which dictates if the -dimension uses hash or interval partitioning. - -The `chunk_time_interval` should be specified as follows: - -* If the column to be partitioned is a TIMESTAMP, TIMESTAMPTZ, or -DATE, this length should be specified either as an INTERVAL type or -an integer value in *microseconds*. - -* If the column is some other integer type, this length -should be an integer that reflects -the column's underlying semantics (for example, the -`chunk_time_interval` should be given in milliseconds if this column -is the number of milliseconds since the UNIX epoch). - - - Supporting more than **one** additional dimension is currently - experimental. For any production environments, users are recommended - to use at most one "space" dimension. - - - ### Sample use -First convert table `conditions` to hypertable with just time -partitioning on column `time`, then add an additional partition key on `location` with four partitions: +First convert table `conditions` to hypertable with just range +partitioning on column `time`, then add an additional partition key on +`location` with four partitions: ```sql -SELECT create_hypertable('conditions', 'time'); -SELECT add_dimension('conditions', 'location', number_partitions => 4); +SELECT create_hypertable('conditions', by_range('time')); +SELECT add_dimension('conditions', by_hash('location', 4)); ``` -Convert table `conditions` to hypertable with time partitioning on `time` and -space partitioning (2 partitions) on `location`, then add two additional dimensions. +Convert table `conditions` to hypertable with range partitioning on +`time` then add three additional dimensions: one hash partitioning on +`location`, one range partition on `time_received`, and one hash +partitionining on `device_id`. ```sql -SELECT create_hypertable('conditions', 'time', 'location', 2); -SELECT add_dimension('conditions', 'time_received', chunk_time_interval => INTERVAL '1 day'); -SELECT add_dimension('conditions', 'device_id', number_partitions => 2); -SELECT add_dimension('conditions', 'device_id', number_partitions => 2, if_not_exists => true); +SELECT create_hypertable('conditions', by_range('time')); +SELECT add_dimension('conditions', , by_hash('location', 2)); +SELECT add_dimension('conditions', by_range('time_received', INTERVAL '1 day')); +SELECT add_dimension('conditions', by_hash('device_id', 2)); +SELECT add_dimension('conditions', by_hash('device_id', 2), if_not_exists => true); ``` Now in a multi-node example for distributed hypertables with a cluster of one access node and two data nodes, configure the access node for access to the two data nodes. Then, convert table `conditions` to -a distributed hypertable with just time partitioning on column `time`, -and finally add a space partitioning dimension on `location` +a distributed hypertable with just range partitioning on column `time`, +and finally add a hash partitioning dimension on `location` with two partitions (as the number of the attached data nodes). ```sql SELECT add_data_node('dn1', host => 'dn1.example.com'); SELECT add_data_node('dn2', host => 'dn2.example.com'); SELECT create_distributed_hypertable('conditions', 'time'); -SELECT add_dimension('conditions', 'location', number_partitions => 2); +SELECT add_dimension('conditions', by_hash('location', 2)); ``` [create_hypertable]: /api/:currentVersion:/hypertable/create_hypertable/ diff --git a/api/add_dimension_old.md b/api/add_dimension_old.md new file mode 100644 index 0000000000..c8dd42a1cd --- /dev/null +++ b/api/add_dimension_old.md @@ -0,0 +1,170 @@ +--- +api_name: add_dimension() +excerpt: Add a space-partitioning dimension to a hypertable +topics: [hypertables] +keywords: [hypertables, partitions] +tags: [dimensions, chunks] +api: + license: apache + type: function +--- + +# add_dimension() + +Add an additional partitioning dimension to a Timescale hypertable. +The column selected as the dimension can either use interval +partitioning (for example, for a second time partition) or hash partitioning. + + +The `add_dimension` command can only be executed after a table has been +converted to a hypertable (via `create_hypertable`), but must similarly +be run only on an empty hypertable. + + +**Space partitions**: Using space partitions is highly recommended +for [distributed hypertables][distributed-hypertables] to achieve +efficient scale-out performance. For [regular hypertables][regular-hypertables] +that exist only on a single node, additional partitioning can be used +for specialized use cases and not recommended for most users. + +Space partitions use hashing: Every distinct item is hashed to one of +*N* buckets. Remember that we are already using (flexible) time +intervals to manage chunk sizes; the main purpose of space +partitioning is to enable parallelization across multiple +data nodes (in the case of distributed hypertables) or +across multiple disks within the same time interval +(in the case of single-node deployments). + +### Parallelizing queries across multiple data nodes + +In a distributed hypertable, space partitioning enables inserts to be +parallelized across data nodes, even while the inserted rows share +timestamps from the same time interval, and thus increases the ingest rate. +Query performance also benefits by being able to parallelize queries +across nodes, particularly when full or partial aggregations can be +"pushed down" to data nodes (for example, as in the query +`avg(temperature) FROM conditions GROUP BY hour, location` +when using `location` as a space partition). Please see our +[best practices about partitioning in distributed hypertables][distributed-hypertable-partitioning-best-practices] +for more information. + +### Parallelizing disk I/O on a single node + +Parallel I/O can benefit in two scenarios: (a) two or more concurrent +queries should be able to read from different disks in parallel, or +(b) a single query should be able to use query parallelization to read +from multiple disks in parallel. + +Thus, users looking for parallel I/O have two options: + +1. Use a RAID setup across multiple physical disks, and expose a +single logical disk to the hypertable (that is, via a single tablespace). + +1. For each physical disk, add a separate tablespace to the +database. Timescale allows you to actually add multiple tablespaces +to a *single* hypertable (although under the covers, a hypertable's +chunks are spread across the tablespaces associated with that hypertable). + +We recommend a RAID setup when possible, as it supports both forms of +parallelization described above (that is, separate queries to separate +disks, single query to multiple disks in parallel). The multiple +tablespace approach only supports the former. With a RAID setup, +*no spatial partitioning is required*. + +That said, when using space partitions, we recommend using 1 +space partition per disk. + +Timescale does *not* benefit from a very large number of space +partitions (such as the number of unique items you expect in partition +field). A very large number of such partitions leads both to poorer +per-partition load balancing (the mapping of items to partitions using +hashing), as well as much increased planning latency for some types of +queries. + +### Required arguments + +|Name|Type|Description| +|-|-|-| +|`hypertable`|REGCLASS|Hypertable to add the dimension to| +|`column_name`|TEXT|Column to partition by| + +### Optional arguments + +|Name|Type|Description| +|-|-|-| +|`number_partitions`|INTEGER|Number of hash partitions to use on `column_name`. Must be > 0| +|`chunk_time_interval`|INTERVAL|Interval that each chunk covers. Must be > 0| +|`partitioning_func`|REGCLASS|The function to use for calculating a value's partition (see `create_hypertable` [instructions][create_hypertable])| +|`if_not_exists`|BOOLEAN|Set to true to avoid throwing an error if a dimension for the column already exists. A notice is issued instead. Defaults to false| + +### Returns + +|Column|Type|Description| +|-|-|-| +|`dimension_id`|INTEGER|ID of the dimension in the TimescaleDB internal catalog| +|`schema_name`|TEXT|Schema name of the hypertable| +|`table_name`|TEXT|Table name of the hypertable| +|`column_name`|TEXT|Column name of the column to partition by| +|`created`|BOOLEAN|True if the dimension was added, false when `if_not_exists` is true and no dimension was added| + +When executing this function, either `number_partitions` or +`chunk_time_interval` must be supplied, which dictates if the +dimension uses hash or interval partitioning. + +The `chunk_time_interval` should be specified as follows: + +* If the column to be partitioned is a TIMESTAMP, TIMESTAMPTZ, or +DATE, this length should be specified either as an INTERVAL type or +an integer value in *microseconds*. + +* If the column is some other integer type, this length +should be an integer that reflects +the column's underlying semantics (for example, the +`chunk_time_interval` should be given in milliseconds if this column +is the number of milliseconds since the UNIX epoch). + + + Supporting more than **one** additional dimension is currently + experimental. For any production environments, users are recommended + to use at most one "space" dimension. + + + +### Sample use + +First convert table `conditions` to hypertable with just time +partitioning on column `time`, then add an additional partition key on `location` with four partitions: + +```sql +SELECT create_hypertable('conditions', 'time'); +SELECT add_dimension('conditions', 'location', number_partitions => 4); +``` + +Convert table `conditions` to hypertable with time partitioning on `time` and +space partitioning (2 partitions) on `location`, then add two additional dimensions. + +```sql +SELECT create_hypertable('conditions', 'time', 'location', 2); +SELECT add_dimension('conditions', 'time_received', chunk_time_interval => INTERVAL '1 day'); +SELECT add_dimension('conditions', 'device_id', number_partitions => 2); +SELECT add_dimension('conditions', 'device_id', number_partitions => 2, if_not_exists => true); +``` + +Now in a multi-node example for distributed hypertables with a cluster +of one access node and two data nodes, configure the access node for +access to the two data nodes. Then, convert table `conditions` to +a distributed hypertable with just time partitioning on column `time`, +and finally add a space partitioning dimension on `location` +with two partitions (as the number of the attached data nodes). + +```sql +SELECT add_data_node('dn1', host => 'dn1.example.com'); +SELECT add_data_node('dn2', host => 'dn2.example.com'); +SELECT create_distributed_hypertable('conditions', 'time'); +SELECT add_dimension('conditions', 'location', number_partitions => 2); +``` + +[create_hypertable]: /api/:currentVersion:/hypertable/create_hypertable_old/ +[distributed-hypertable-partitioning-best-practices]: /use-timescale/:currentVersion:/hypertables/about-hypertables/#space-partitioning +[distributed-hypertables]: /api/:currentVersion:/distributed-hypertables/create_distributed_hypertable/ +[regular-hypertables]: /api/:currentVersion:/hypertable/create_hypertable/ diff --git a/api/chunks.md b/api/chunks.md index f7eb605558..dba3d8db0f 100644 --- a/api/chunks.md +++ b/api/chunks.md @@ -47,7 +47,7 @@ Get information about the chunks of a hypertable. CREATE TABLESPACE tablespace1 location '/usr/local/pgsql/data1'; CREATE TABLE hyper_int (a_col integer, b_col integer, c integer); -SELECT table_name from create_hypertable('hyper_int', 'a_col', chunk_time_interval=> 10); +SELECT table_name from create_hypertable('hyper_int', by_range('a_col', 10)); CREATE OR REPLACE FUNCTION integer_now_hyper_int() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(a_col), 0) FROM hyper_int $$; SELECT set_integer_now_func('hyper_int', 'integer_now_hyper_int'); diff --git a/api/compression_settings.md b/api/compression_settings.md index 20fc993008..2c0952b219 100644 --- a/api/compression_settings.md +++ b/api/compression_settings.md @@ -35,7 +35,7 @@ decompressed by mutable compression. ```sql CREATE TABLE hypertab (a_col integer, b_col integer, c_col integer, d_col integer, e_col integer); -SELECT table_name FROM create_hypertable('hypertab', 'a_col', chunk_time_interval => 864000000); +SELECT table_name FROM create_hypertable('hypertab', by_range('a_col', 864000000)); ALTER TABLE hypertab SET (timescaledb.compress, timescaledb.compress_segmentby = 'a_col,b_col', timescaledb.compress_orderby = 'c_col desc, d_col asc nulls last'); diff --git a/api/create_distributed_hypertable.md b/api/create_distributed_hypertable.md index d8fed56b57..c55e18871c 100644 --- a/api/create_distributed_hypertable.md +++ b/api/create_distributed_hypertable.md @@ -15,6 +15,8 @@ Creates a TimescaleDB hypertable distributed across a multinode environment. Use this function in place of [`create_hypertable`][create-hypertable] when creating distributed hypertables. +**Note that distributed tables use the old API. The new generalized API is described in [`create_hypertable`](create_hypertable.md).** + ### Required arguments |Name|Type|Description| @@ -69,19 +71,19 @@ SELECT create_distributed_hypertable('conditions', 'time', 'location', #### Best practices -**Space partitions:** As opposed to the normal +**Hash partitions:** As opposed to the normal [`create_hypertable` best practices][create-hypertable], -space partitions are highly recommended for distributed hypertables. -Incoming data is divided among data nodes based upon the space -partition (the first one if multiple space partitions have been -defined). If there is no space partition, all the data for each time +hash partitions are highly recommended for distributed hypertables. +Incoming data is divided among data nodes based upon the hash +partition (the first one if multiple hash partitions have been +defined). If there is no hash partition, all the data for each time slice is written to a single data node. **Time intervals:** Follow the same guideline in setting the `chunk_time_interval` as with [`create_hypertable`][create-hypertable], bearing in mind that the calculation needs to be based on the memory capacity of the data nodes. However, one additional thing to -consider, assuming space partitioning is being used, is that the +consider, assuming hash partitioning is being used, is that the hypertable is evenly distributed across the data nodes, allowing a larger time interval. @@ -91,7 +93,7 @@ table being served by these data nodes, then you should use a time interval of 1 week (`7 * 10 GB / 5 * 64 GB ~= 22% main memory` used for most recent chunks). -If space partitioning is not being used, the `chunk_time_interval` +If hash partitioning is not being used, the `chunk_time_interval` should be the same as the non-distributed case, as all of the incoming data is handled by a single node. @@ -109,4 +111,4 @@ recommend using `replication_factor` > 1, and instead rely on physical replication of each data node if such fault-tolerance is required. [best-practices]: /use-timescale/:currentVersion:/hypertables/about-hypertables/#best-practices-for-time-partitioning -[create-hypertable]: /use-timescale/:currentVersion:/hypertables/create +[create-hypertable]: /api/:currentVersion:/hypertables/create_hypertable_old diff --git a/api/create_hypertable.md b/api/create_hypertable.md index 46306403fb..aa29057459 100644 --- a/api/create_hypertable.md +++ b/api/create_hypertable.md @@ -11,8 +11,7 @@ api: # create_hypertable() Creates a TimescaleDB hypertable from a PostgreSQL table (replacing the latter), -partitioned on time and with the option to partition on one or more other -columns. The PostgreSQL table cannot be an already partitioned table +partitioned on one dimension. The PostgreSQL table cannot be an already partitioned table (declarative partitioning or inheritance). In case of a non-empty table, it is possible to migrate the data during hypertable creation using the `migrate_data` option, although this might take a long time and has certain limitations when @@ -24,38 +23,28 @@ on the resulting hypertable. For more information about using hypertables, including chunk size partitioning, see the [hypertable section][hypertable-docs]. +**Note: this reference describes the new generalized hypertable API. The [old interface for `create_hypertable is also available](create_hypertable_old.md).** + ## Required arguments |Name|Type|Description| |-|-|-| |`relation`|REGCLASS|Identifier of table to convert to hypertable.| -|`time_column_name`|REGCLASS| Name of the column containing time values as well as the primary column to partition by.| +| `dimension` | DIMENSION_INFO | Dimension info object for the column to partition on. | ## Optional arguments |Name|Type|Description| |-|-|-| -|`partitioning_column`|REGCLASS|Name of an additional column to partition by. If provided, the `number_partitions` argument must also be provided.| -|`number_partitions`|INTEGER|Number of [hash partitions][hash-partitions] to use for `partitioning_column`. Must be > 0.| -|`chunk_time_interval`|INTERVAL|Event time that each chunk covers. Must be > 0. Default is 7 days.| |`create_default_indexes`|BOOLEAN|Whether to create default indexes on time/partitioning columns. Default is TRUE.| |`if_not_exists`|BOOLEAN|Whether to print warning if table already converted to hypertable or raise exception. Default is FALSE.| -|`partitioning_func`|REGCLASS|The function to use for calculating a value's partition.| -|`associated_schema_name`|REGCLASS|Name of the schema for internal hypertable tables. Default is `_timescaledb_internal`.| -|`associated_table_prefix`|TEXT|Prefix for internal hypertable chunk names. Default is `_hyper`.| |`migrate_data`|BOOLEAN|Set to TRUE to migrate any existing data from the `relation` table to chunks in the new hypertable. A non-empty table generates an error without this option. Large tables may take significant time to migrate. Defaults to FALSE.| -|`time_partitioning_func`|REGCLASS| Function to convert incompatible primary time column values to compatible ones. The function must be `IMMUTABLE`.| -|`replication_factor`|INTEGER|Replication factor to use with distributed hypertable. If not provided, value is determined by the `timescaledb.hypertable_replication_factor_default` GUC. | -|`data_nodes`|ARRAY|This is the set of data nodes that are used for this table if it is distributed. This has no impact on non-distributed hypertables. If no data nodes are specified, a distributed hypertable uses all data nodes known by this instance.| -|`distributed`|BOOLEAN|Set to TRUE to create distributed hypertable. If not provided, value is determined by the `timescaledb.hypertable_distributed_default` GUC. When creating a distributed hypertable, consider using [`create_distributed_hypertable`][create_distributed_hypertable] in place of `create_hypertable`. Default is NULL. | ## Returns |Column|Type|Description| |-|-|-| |`hypertable_id`|INTEGER|ID of the hypertable in TimescaleDB.| -|`schema_name`|TEXT|Schema name of the table converted to hypertable.| -|`table_name`|TEXT|Table name of the table converted to hypertable.| |`created`|BOOLEAN|TRUE if the hypertable was created, FALSE when `if_not_exists` is true and no hypertable was created.| @@ -81,78 +70,43 @@ obtaining `SHARE ROW EXCLUSIVE` lock on the referenced tables before calling [PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-lock.html) for the syntax. -## Units - -The `time` column supports the following data types: - -|Description|Types| -|-|-| -|Timestamp| TIMESTAMP, TIMESTAMPTZ| -|Date|DATE| -|Integer|SMALLINT, INT, BIGINT| - - -The type flexibility of the 'time' column allows the use of non-time-based -values as the primary chunk partitioning column, as long as those values can -increment. - - -For incompatible data types (for example, `jsonb`) you can specify a function to -the `time_partitioning_func` argument which can extract a compatible data type. - -The units of `chunk_time_interval` should be set as follows: - -* For time columns having timestamp or DATE types, the `chunk_time_interval` - should be specified either as an `interval` type or an integral value in - *microseconds*. -* For integer types, the `chunk_time_interval` **must** be set explicitly, as - the database does not otherwise understand the semantics of what each - integer value represents (a second, millisecond, nanosecond, etc.). So if - your time column is the number of milliseconds since the UNIX epoch, and you - wish to have each chunk cover 1 day, you should specify - `chunk_time_interval => 86400000`. - -In case of hash partitioning (in other words, if `number_partitions` is greater -than zero), it is possible to optionally specify a custom partitioning function. -If no custom partitioning function is specified, the default partitioning -function is used. The default partitioning function calls PostgreSQL's internal -hash function for the given type, if one exists. Thus, a custom partitioning -function can be used for value types that do not have a native PostgreSQL hash -function. A partitioning function should take a single `anyelement` type -argument and return a positive `integer` hash value. Note that this hash value -is *not* a partition ID, but rather the inserted value's position in the -dimension's key space, which is then divided across the partitions. - The time column in `create_hypertable` must be defined as `NOT NULL`. If this is not already specified on table creation, `create_hypertable` automatically adds this constraint on the table when it is executed. +#### Dimension info + +When creating a hypertable, you need to provide dimension info using +one of the [dimension info constructors](dimension_info.md). This is +used to specify what column to partition by and in what way to +partition. + ## Sample use -Convert table `conditions` to hypertable with just time partitioning on column `time`: +Convert table `conditions` to hypertable with just range partitioning on column `time`: ```sql -SELECT create_hypertable('conditions', 'time'); +SELECT create_hypertable('conditions', by_range('time')); ``` Convert table `conditions` to hypertable, setting `chunk_time_interval` to 24 hours. ```sql -SELECT create_hypertable('conditions', 'time', chunk_time_interval => 86400000000); -SELECT create_hypertable('conditions', 'time', chunk_time_interval => INTERVAL '1 day'); +SELECT create_hypertable('conditions', by_range('time', 86400000000)); +SELECT create_hypertable('conditions', by_range('time', INTERVAL '1 day')); ``` Convert table `conditions` to hypertable. Do not raise a warning if `conditions` is already a hypertable: ```sql -SELECT create_hypertable('conditions', 'time', if_not_exists => TRUE); +SELECT create_hypertable('conditions', by_range('time'), if_not_exists => TRUE); ``` Time partition table `measurements` on a composite column type `report` using a -time partitioning function. Requires an immutable function that can convert the +range partitioning function. Requires an immutable function that can convert the column value into a supported column value: ```sql @@ -164,7 +118,7 @@ CREATE FUNCTION report_reported(report) IMMUTABLE AS 'SELECT $1.reported'; -SELECT create_hypertable('measurements', 'report', time_partitioning_func => 'report_reported'); +SELECT create_hypertable('measurements', by_range('report', time_partitioning_func => 'report_reported')); ``` Time partition table `events`, on a column type `jsonb` (`event`), which has @@ -177,7 +131,7 @@ CREATE FUNCTION event_started(jsonb) IMMUTABLE AS $func$SELECT ($1->>'started')::timestamptz$func$; -SELECT create_hypertable('events', 'event', time_partitioning_func => 'event_started'); +SELECT create_hypertable('events', by_range('event', time_partitioning_func => 'event_started')); ``` [create_distributed_hypertable]: /api/:currentVersion:/distributed-hypertables/create_distributed_hypertable diff --git a/api/create_hypertable_old.md b/api/create_hypertable_old.md new file mode 100644 index 0000000000..46306403fb --- /dev/null +++ b/api/create_hypertable_old.md @@ -0,0 +1,185 @@ +--- +api_name: create_hypertable() +excerpt: Create a hypertable +topics: [hypertables] +keywords: [hypertables, create] +api: + license: apache + type: function +--- + +# create_hypertable() + +Creates a TimescaleDB hypertable from a PostgreSQL table (replacing the latter), +partitioned on time and with the option to partition on one or more other +columns. The PostgreSQL table cannot be an already partitioned table +(declarative partitioning or inheritance). In case of a non-empty table, it is +possible to migrate the data during hypertable creation using the `migrate_data` +option, although this might take a long time and has certain limitations when +the table contains foreign key constraints (see below). + +After creation, all actions, such as `ALTER TABLE`, `SELECT`, etc., still work +on the resulting hypertable. + +For more information about using hypertables, including chunk size partitioning, +see the [hypertable section][hypertable-docs]. + +## Required arguments + +|Name|Type|Description| +|-|-|-| +|`relation`|REGCLASS|Identifier of table to convert to hypertable.| +|`time_column_name`|REGCLASS| Name of the column containing time values as well as the primary column to partition by.| + +## Optional arguments + +|Name|Type|Description| +|-|-|-| +|`partitioning_column`|REGCLASS|Name of an additional column to partition by. If provided, the `number_partitions` argument must also be provided.| +|`number_partitions`|INTEGER|Number of [hash partitions][hash-partitions] to use for `partitioning_column`. Must be > 0.| +|`chunk_time_interval`|INTERVAL|Event time that each chunk covers. Must be > 0. Default is 7 days.| +|`create_default_indexes`|BOOLEAN|Whether to create default indexes on time/partitioning columns. Default is TRUE.| +|`if_not_exists`|BOOLEAN|Whether to print warning if table already converted to hypertable or raise exception. Default is FALSE.| +|`partitioning_func`|REGCLASS|The function to use for calculating a value's partition.| +|`associated_schema_name`|REGCLASS|Name of the schema for internal hypertable tables. Default is `_timescaledb_internal`.| +|`associated_table_prefix`|TEXT|Prefix for internal hypertable chunk names. Default is `_hyper`.| +|`migrate_data`|BOOLEAN|Set to TRUE to migrate any existing data from the `relation` table to chunks in the new hypertable. A non-empty table generates an error without this option. Large tables may take significant time to migrate. Defaults to FALSE.| +|`time_partitioning_func`|REGCLASS| Function to convert incompatible primary time column values to compatible ones. The function must be `IMMUTABLE`.| +|`replication_factor`|INTEGER|Replication factor to use with distributed hypertable. If not provided, value is determined by the `timescaledb.hypertable_replication_factor_default` GUC. | +|`data_nodes`|ARRAY|This is the set of data nodes that are used for this table if it is distributed. This has no impact on non-distributed hypertables. If no data nodes are specified, a distributed hypertable uses all data nodes known by this instance.| +|`distributed`|BOOLEAN|Set to TRUE to create distributed hypertable. If not provided, value is determined by the `timescaledb.hypertable_distributed_default` GUC. When creating a distributed hypertable, consider using [`create_distributed_hypertable`][create_distributed_hypertable] in place of `create_hypertable`. Default is NULL. | + +## Returns + +|Column|Type|Description| +|-|-|-| +|`hypertable_id`|INTEGER|ID of the hypertable in TimescaleDB.| +|`schema_name`|TEXT|Schema name of the table converted to hypertable.| +|`table_name`|TEXT|Table name of the table converted to hypertable.| +|`created`|BOOLEAN|TRUE if the hypertable was created, FALSE when `if_not_exists` is true and no hypertable was created.| + + +If you use `SELECT * FROM create_hypertable(...)` you get the return value +formatted as a table with column headings. + + +The use of the `migrate_data` argument to convert a non-empty table can +lock the table for a significant amount of time, depending on how much data is +in the table. It can also run into deadlock if foreign key constraints exist to +other tables. + +When converting a normal SQL table to a hypertable, pay attention to how you handle +constraints. A hypertable can contain foreign keys to normal SQL table columns, +but the reverse is not allowed. UNIQUE and PRIMARY constraints must include the +partitioning key. + +The deadlock is likely to happen when concurrent transactions simultaneously try +to insert data into tables that are referenced in the foreign key constraints +and into the converting table itself. The deadlock can be prevented by manually +obtaining `SHARE ROW EXCLUSIVE` lock on the referenced tables before calling +`create_hypertable` in the same transaction, see +[PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-lock.html) +for the syntax. + +## Units + +The `time` column supports the following data types: + +|Description|Types| +|-|-| +|Timestamp| TIMESTAMP, TIMESTAMPTZ| +|Date|DATE| +|Integer|SMALLINT, INT, BIGINT| + + +The type flexibility of the 'time' column allows the use of non-time-based +values as the primary chunk partitioning column, as long as those values can +increment. + + +For incompatible data types (for example, `jsonb`) you can specify a function to +the `time_partitioning_func` argument which can extract a compatible data type. + +The units of `chunk_time_interval` should be set as follows: + +* For time columns having timestamp or DATE types, the `chunk_time_interval` + should be specified either as an `interval` type or an integral value in + *microseconds*. +* For integer types, the `chunk_time_interval` **must** be set explicitly, as + the database does not otherwise understand the semantics of what each + integer value represents (a second, millisecond, nanosecond, etc.). So if + your time column is the number of milliseconds since the UNIX epoch, and you + wish to have each chunk cover 1 day, you should specify + `chunk_time_interval => 86400000`. + +In case of hash partitioning (in other words, if `number_partitions` is greater +than zero), it is possible to optionally specify a custom partitioning function. +If no custom partitioning function is specified, the default partitioning +function is used. The default partitioning function calls PostgreSQL's internal +hash function for the given type, if one exists. Thus, a custom partitioning +function can be used for value types that do not have a native PostgreSQL hash +function. A partitioning function should take a single `anyelement` type +argument and return a positive `integer` hash value. Note that this hash value +is *not* a partition ID, but rather the inserted value's position in the +dimension's key space, which is then divided across the partitions. + + +The time column in `create_hypertable` must be defined as `NOT NULL`. If this is +not already specified on table creation, `create_hypertable` automatically adds +this constraint on the table when it is executed. + + +## Sample use + +Convert table `conditions` to hypertable with just time partitioning on column `time`: + +```sql +SELECT create_hypertable('conditions', 'time'); +``` + +Convert table `conditions` to hypertable, setting `chunk_time_interval` to 24 hours. + +```sql +SELECT create_hypertable('conditions', 'time', chunk_time_interval => 86400000000); +SELECT create_hypertable('conditions', 'time', chunk_time_interval => INTERVAL '1 day'); +``` + +Convert table `conditions` to hypertable. Do not raise a warning +if `conditions` is already a hypertable: + +```sql +SELECT create_hypertable('conditions', 'time', if_not_exists => TRUE); +``` + +Time partition table `measurements` on a composite column type `report` using a +time partitioning function. Requires an immutable function that can convert the +column value into a supported column value: + +```sql +CREATE TYPE report AS (reported timestamp with time zone, contents jsonb); + +CREATE FUNCTION report_reported(report) + RETURNS timestamptz + LANGUAGE SQL + IMMUTABLE AS + 'SELECT $1.reported'; + +SELECT create_hypertable('measurements', 'report', time_partitioning_func => 'report_reported'); +``` + +Time partition table `events`, on a column type `jsonb` (`event`), which has +a top level key (`started`) containing an ISO 8601 formatted timestamp: + +```sql +CREATE FUNCTION event_started(jsonb) + RETURNS timestamptz + LANGUAGE SQL + IMMUTABLE AS + $func$SELECT ($1->>'started')::timestamptz$func$; + +SELECT create_hypertable('events', 'event', time_partitioning_func => 'event_started'); +``` + +[create_distributed_hypertable]: /api/:currentVersion:/distributed-hypertables/create_distributed_hypertable +[hash-partitions]: /use-timescale/:currentVersion:/hypertables/about-hypertables/#hypertable-partitioning +[hypertable-docs]: /use-timescale/:currentVersion:/hypertables/ diff --git a/api/delete_data_node.md b/api/delete_data_node.md index 61f0a094a9..6086dbe40c 100644 --- a/api/delete_data_node.md +++ b/api/delete_data_node.md @@ -50,7 +50,7 @@ all attached hypertables. |---|---|---| | `if_exists` | BOOLEAN | Prevent error if the data node does not exist. Defaults to false. | | `force` | BOOLEAN | Force removal of data nodes from hypertables unless that would result in data loss. Defaults to false. | -| `repartition` | BOOLEAN | Make the number of space partitions equal to the new number of data nodes (if such partitioning exists). This ensures that the remaining data nodes are used evenly. Defaults to true. | +| `repartition` | BOOLEAN | Make the number of hash partitions equal to the new number of data nodes (if such partitioning exists). This ensures that the remaining data nodes are used evenly. Defaults to true. | ### Returns diff --git a/api/detach_data_node.md b/api/detach_data_node.md index 63cb0e9afe..ad82046bbf 100644 --- a/api/detach_data_node.md +++ b/api/detach_data_node.md @@ -33,7 +33,7 @@ partition across | `hypertable` | REGCLASS | Name of the distributed hypertable where the data node should be detached. If NULL, the data node is detached from all hypertables. | | `if_attached` | BOOLEAN | Prevent error if the data node is not attached. Defaults to false. | | `force` | BOOLEAN | Force detach of the data node even if that means that the replication factor is reduced below what was set. Note that it is never allowed to reduce the replication factor below 1 since that would cause data loss. | -| `repartition` | BOOLEAN | Make the number of space partitions equal to the new number of data nodes (if such partitioning exists). This ensures that the remaining data nodes are used evenly. Defaults to true. | +| `repartition` | BOOLEAN | Make the number of hash partitions equal to the new number of data nodes (if such partitioning exists). This ensures that the remaining data nodes are used evenly. Defaults to true. | ### Returns diff --git a/api/dimension_info.md b/api/dimension_info.md new file mode 100644 index 0000000000..ff3dcd93ab --- /dev/null +++ b/api/dimension_info.md @@ -0,0 +1,99 @@ +# Dimension Info Constructurs + +The `create_hypertable` and `add_dimension` are used together with +dimension info constructors to specify the dimensions to partition a +hypertable on. + +TimescaleDB currently supports two partition types: partitioning by +range and partitioning by hash. + + +For incompatible data types (for example, `jsonb`) you can specify a function to +the `time_partitioning_func` argument which can extract a compatible +data type + + +## Partition Function + +It is possible to specify a custom partitioning function for both +range and hash partitioning. A partitioning function should take a +`anyelement` argument as the only parameter and return a positive +`integer` hash value. Note that this hash value is _not_ a partition +identifier, but rather the inserted value's position in the +dimension's key space, which is then divided across the partitions. + +If no custom partitioning function is specified, the default +partitioning function is used, which calls PostgreSQL's internal hash +function for the given type. Thus, a custom partitioning function can +be used for value types that do not have a native PostgreSQL hash +function. + +## by_range() + +Creates a by-range dimension info object that can be used with +`create_hypertable` and `add_dimension`. + +### Required Arguments + +| Name | Type | Description | +|---------------|------|---------------------------------| +| `column_name` | NAME | Name of column to partition on. | + + +### Optional Arguments + +| Name | Type | Description | +|----------------------|------------|--------------------------------------------------------------| +| `partition_interval` | ANYELEMENT | Interval to partition column on. | +| `partition_func` | REGPROC | The function to use for calculating the partition of a value. | + +### Returns + +An instance of `_timescaledb_internal.dimension_info`, which is an +opaque type holding the dimension information. + +### Notes + +The `partition_interval` should be specified as follows: + +- If the column to be partitioned is a `TIMESTAMP`, `TIMESTAMPTZ`, or + `DATE`, this length should be specified either as an `INTERVAL` type + or an integer value in *microseconds*. + +- If the column is some other integer type, this length should be an + integer that reflects the column's underlying semantics (for example, the + `partition_interval` should be given in milliseconds if this column + is the number of milliseconds since the UNIX epoch). + +A summary of the partition type and default value depending on the +column type is summarized below. + +| Column Type | Partition Type | Default value | +|------------------------------|------------------|---------------| +| `TIMESTAMP WITHOUT TIMEZONE` | INTERVAL/INTEGER | 1 week | +| `TIMESTAMP WITH TIMEZONE` | INTERVAL/INTEGER | 1 week | +| `DATE` | INTERVAL/INTEGER | 1 week | +| `SMALLINT` | SMALLINT | 10000 | +| `INT` | INT | 100000 | +| `BIGINT` | BIGINT | 1000000 | + +## by_hash() + +### Required Arguments + +| Name | Type | Description | +|---------------------|---------|-----------------------------------------------------------------------------------------| +| `column_name` | NAME | Name of column to partition on. | +| `number_partitions` | INTEGER | Number of [hash partitions][] to use for `partitioning_column`. Must be greater than 0. | + +### Optional Arguments + +| Name | Type | Description | +|------------------|---------|----------------------------------------------------------| +| `partition_func` | REGPROC | The function to use for calculating a value's partition. | + + +### Returns + +An instance of `_timescaledb_internal.dimension_info`, which is an +opaque type holding the dimension information. diff --git a/api/dimensions.md b/api/dimensions.md index 71947b0807..99368bff8a 100644 --- a/api/dimensions.md +++ b/api/dimensions.md @@ -55,9 +55,10 @@ based dimensions. Get information about the dimensions of hypertables. ```sql ---Create a time and space partitioned hypertable +-- Create a range and hash partitioned hypertable CREATE TABLE dist_table(time timestamptz, device int, temp float); -SELECT create_hypertable('dist_table', 'time', 'device', chunk_time_interval=> INTERVAL '7 days', number_partitions=>3); +SELECT create_hypertable('dist_table', by_range('time', INTERVAL '7 days')); +SELECT add_dimension('dist_table', by_hash('device', 3)); SELECT * from timescaledb_information.dimensions ORDER BY hypertable_name, dimension_number; @@ -90,8 +91,8 @@ Get information about dimensions of a hypertable that has two time-based dimensi ``` sql CREATE TABLE hyper_2dim (a_col date, b_col timestamp, c_col integer); -SELECT table_name from create_hypertable('hyper_2dim', 'a_col'); -SELECT add_dimension('hyper_2dim', 'b_col', chunk_time_interval=> '7 days'); +SELECT table_name from create_hypertable('hyper_2dim', by_range('a_col')); +SELECT add_dimension('hyper_2dim', by_range('b_col', INTERVAL '7 days')); SELECT * FROM timescaledb_information.dimensions WHERE hypertable_name = 'hyper_2dim'; diff --git a/api/drop_chunks.md b/api/drop_chunks.md index afceef95da..e07f4938bf 100644 --- a/api/drop_chunks.md +++ b/api/drop_chunks.md @@ -26,7 +26,7 @@ data may still contain timestamps that are before (or after) the specified one. Chunks can only be dropped based on their time intervals. They cannot be dropped -based on a space partition. +based on a hash partition. ## Required arguments diff --git a/api/page-index/page-index.js b/api/page-index/page-index.js index 239d5664d1..6e61774536 100644 --- a/api/page-index/page-index.js +++ b/api/page-index/page-index.js @@ -18,6 +18,10 @@ module.exports = [ title: "create_hypertable", href: "create_hypertable", }, + { + title: "create_hypertable (old interface)", + href: "create_hypertable_old", + }, { title: "show_chunks", href: "show_chunks", @@ -70,6 +74,10 @@ module.exports = [ title: "add_dimension", href: "add_dimension", }, + { + title: "add_dimension (old interface)", + href: "add_dimension_old", + }, { title: "create_index (transaction per chunk)", href: "create_index", diff --git a/api/set_number_partitions.md b/api/set_number_partitions.md index 7f4cc9afb2..4b09138cbc 100644 --- a/api/set_number_partitions.md +++ b/api/set_number_partitions.md @@ -1,6 +1,6 @@ --- api_name: set_number_partitions() -excerpt: Set the number of space partitions for a hypertable +excerpt: Set the number of hash partitions for a hypertable topics: [distributed hypertables, hypertables] keywords: [hypertables, partitions] api: diff --git a/api/time_bucket_ng.md b/api/time_bucket_ng.md index 996b1aac24..1ed5494f7b 100644 --- a/api/time_bucket_ng.md +++ b/api/time_bucket_ng.md @@ -140,8 +140,7 @@ CREATE TABLE conditions( temperature INT NOT NULL); SELECT create_hypertable( - 'conditions', 'day', - chunk_time_interval => INTERVAL '1 day' + 'conditions', by_range('day', INTERVAL '1 day') ); INSERT INTO conditions (day, city, temperature) VALUES diff --git a/getting-started/tables-hypertables.md b/getting-started/tables-hypertables.md index 60f282e8b3..6e6dafcc07 100644 --- a/getting-started/tables-hypertables.md +++ b/getting-started/tables-hypertables.md @@ -63,7 +63,7 @@ For the financial dataset used in this guide, create a hypertable named table that holds the timestamp data to use for partitioning (`time`): 1. Create an index to support efficient queries on the `symbol` and `time` diff --git a/migrate/dual-write-and-backfill/dual-write-from-postgres.md b/migrate/dual-write-and-backfill/dual-write-from-postgres.md index 48deae926c..4eb1761cd2 100644 --- a/migrate/dual-write-and-backfill/dual-write-from-postgres.md +++ b/migrate/dual-write-and-backfill/dual-write-from-postgres.md @@ -103,7 +103,7 @@ psql -X -d "$TARGET" \ For each table which should be converted to a hypertable in the target database, execute: ``` -SELECT create_hypertable('', '
', by_range('