This is an automated email from the ASF dual-hosted git repository.

kfaraz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git


The following commit(s) were added to refs/heads/master by this push:
     new 99e1b4efee Update default value of `inputSegmentSizeBytes` in 
configuration docs (#12678)
99e1b4efee is described below

commit 99e1b4efee3fd267e65337a31a5cfb32b548c711
Author: Tejaswini Bandlamudi <[email protected]>
AuthorDate: Wed Jun 22 09:05:03 2022 +0530

    Update default value of `inputSegmentSizeBytes` in configuration docs 
(#12678)
---
 docs/configuration/index.md                                | 2 +-
 integration-tests/docker/test-data/upgrade-sample-data.sql | 2 +-
 website/.spelling                                          | 1 +
 3 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/docs/configuration/index.md b/docs/configuration/index.md
index bc591a4ebb..82d6cdf4eb 100644
--- a/docs/configuration/index.md
+++ b/docs/configuration/index.md
@@ -965,7 +965,7 @@ You can configure automatic compaction through the 
following properties:
 |--------|-----------|--------|
 |`dataSource`|dataSource name to be compacted.|yes|
 |`taskPriority`|[Priority](../ingestion/tasks.md#priority) of compaction 
task.|no (default = 25)|
-|`inputSegmentSizeBytes`|Maximum number of total segment bytes processed per 
compaction task. Since a time chunk must be processed in its entirety, if the 
segments for a particular time chunk have a total size in bytes greater than 
this parameter, compaction will not run for that time chunk. Because each 
compaction task runs with a single thread, setting this value too far above 
1–2GB will result in compaction tasks taking an excessive amount of time.|no 
(default = Long.MAX_VALUE)|
+|`inputSegmentSizeBytes`|Maximum number of total segment bytes processed per 
compaction task. Since a time chunk must be processed in its entirety, if the 
segments for a particular time chunk have a total size in bytes greater than 
this parameter, compaction will not run for that time chunk. Because each 
compaction task runs with a single thread, setting this value too far above 
1–2GB will result in compaction tasks taking an excessive amount of time.|no 
(default = 100,000,000,000,000 i. [...]
 |`maxRowsPerSegment`|Max number of rows per segment after compaction.|no|
 |`skipOffsetFromLatest`|The offset for searching segments to be compacted in 
[ISO 8601](https://en.wikipedia.org/wiki/ISO_8601) duration format. Strongly 
recommended to set for realtime dataSources. See [Data handling with 
compaction](../ingestion/compaction.md#data-handling-with-compaction).|no 
(default = "P1D")|
 |`tuningConfig`|Tuning config for compaction tasks. See below [Automatic 
compaction tuningConfig](#automatic-compaction-tuningconfig).|no|
diff --git a/integration-tests/docker/test-data/upgrade-sample-data.sql 
b/integration-tests/docker/test-data/upgrade-sample-data.sql
index 3ecad88da0..1f56f5af13 100644
--- a/integration-tests/docker/test-data/upgrade-sample-data.sql
+++ b/integration-tests/docker/test-data/upgrade-sample-data.sql
@@ -13,4 +13,4 @@
 -- See the License for the specific language governing permissions and
 -- limitations under the License.
 
-INSERT INTO druid_config (name, payload) VALUES 
('coordinator.compaction.config', 
'{"compactionConfigs":[{"dataSource":"upgradeTest","taskPriority":25,"inputSegmentSizeBytes":9223372036854775807,"maxRowsPerSegment":null,"skipOffsetFromLatest":"P1D","tuningConfig":{"maxRowsInMemory":null,"maxBytesInMemory":null,"maxTotalRows":null,"splitHintSpec":null,"partitionsSpec":{"type":"hashed","numShards":null,"partitionDimensions":[],"partitionFunction":"murmur3_32_abs","maxRowsPerSegment":500000
 [...]
+INSERT INTO druid_config (name, payload) VALUES 
('coordinator.compaction.config', 
'{"compactionConfigs":[{"dataSource":"upgradeTest","taskPriority":25,"inputSegmentSizeBytes":100000000000000,"maxRowsPerSegment":null,"skipOffsetFromLatest":"P1D","tuningConfig":{"maxRowsInMemory":null,"maxBytesInMemory":null,"maxTotalRows":null,"splitHintSpec":null,"partitionsSpec":{"type":"hashed","numShards":null,"partitionDimensions":[],"partitionFunction":"murmur3_32_abs","maxRowsPerSegment":5000000},"
 [...]
diff --git a/website/.spelling b/website/.spelling
index c9de140f80..ab4a41cd1a 100644
--- a/website/.spelling
+++ b/website/.spelling
@@ -2018,3 +2018,4 @@ protobuf
 Golang
 multiValueHandling
 _n_
+100TB


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to