-
Notifications
You must be signed in to change notification settings - Fork 50
Expand file tree
/
Copy pathproperty-overrides.json
More file actions
2272 lines (2272 loc) · 171 KB
/
property-overrides.json
File metadata and controls
2272 lines (2272 loc) · 171 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
{
"properties": {
"abort_index_segment_size": {
"description": "Capacity (in number of txns) of an abort index segment.\n\nEach partition tracks the aborted transaction offset ranges to help service client requests. If the number of transactions increases beyond this threshold, they are flushed to disk to ease memory pressure. Then they're loaded on demand. This configuration controls the maximum number of aborted transactions before they are flushed to disk.",
"config_scope": "cluster"
},
"admin": {
"example": [
"[,yaml]",
"----",
"redpanda:",
" admin:",
" - name: <admin-api-name>",
" address: <external-broker-hostname>",
" port: <admin-api-port>",
"----",
"",
"Replace the following placeholders with your values:",
"",
"* `<admin-api-name>`: Name for the Admin API listener (TLS configuration is handled separately in the <<admin_api_tls,`admin_api_tls`>> broker property)",
"* `<external-broker-hostname>`: The externally accessible hostname or IP address that clients use to connect to this broker",
"* `<admin-api-port>`: The port number for the Admin API endpoint"
],
"description": "Network address for the glossterm:Admin API[] server.",
"config_scope": "broker",
"category": "redpanda"
},
"admin_api_doc_dir": {
"config_scope": "broker",
"category": "redpanda"
},
"admin_api_tls": {
"example": [
"[,yaml]",
"----",
"redpanda:",
" admin_api_tls:",
" - name: <admin-api-tls-name>",
" enabled: true",
" cert_file: <path-to-cert-file>",
" key_file: <path-to-key-file>",
" truststore_file: <path-to-truststore-file>",
" require_client_auth: true",
"----",
"",
"Replace the following placeholders with your values:",
"",
"* `<admin-api-tls-name>`: Name that matches your Admin API listener (defined in the <<admin, `admin`>> broker property)",
"* `<path-to-cert-file>`: Full path to the TLS certificate file",
"* `<path-to-key-file>`: Full path to the TLS private key file",
"* `<path-to-truststore-file>`: Full path to the Certificate Authority file"
],
"config_scope": "broker",
"category": "redpanda"
},
"advertised_kafka_api": {
"description": "Address of the Kafka API published to the clients. If not set, the <<kafka_api, `kafka_api`>> broker property is used. When behind a load balancer or in containerized environments, this should be the externally-accessible address that clients use to connect.",
"example": [
"[,yaml]",
"----",
"redpanda:",
" advertised_kafka_api:",
" - name: <kafka-api-name>",
" address: <external-broker-hostname>",
" port: <kafka-port>",
"----",
"",
"Replace the following placeholders with your values:",
"",
"* `<kafka-api-name>`: Name that matches your Kafka API listener (defined in the <<kafka_api, `kafka_api`>> broker property)",
"* `<external-broker-hostname>`: The externally accessible hostname or IP address that clients use to connect to this broker",
"* `<kafka-port>`: The port number for the Kafka API endpoint"
],
"config_scope": "broker",
"category": "redpanda"
},
"advertised_pandaproxy_api": {
"config_scope": "broker",
"category": "pandaproxy",
"description": "Network address for the HTTP Proxy API server to publish to clients."
},
"advertised_rpc_api": {
"description": "Address of RPC endpoint published to other cluster members. If not set, the <<rpc_server, `rpc_server`>> broker property is used. This should be the address other brokers can use to communicate with this broker.",
"example": [
"[,yaml]",
"----",
"redpanda:",
" advertised_rpc_api:",
" address: <external-broker-hostname>",
" port: <rpc-port>",
"----",
"",
"Replace the following placeholders with your values:",
"",
"* `<external-broker-hostname>`: The externally accessible hostname or IP address that other brokers use to communicate with this broker",
"* `<rpc-port>`: The port number for the RPC endpoint (default is 33145)"
],
"config_scope": "broker",
"category": "redpanda"
},
"aggregate_metrics": {
"description": "Enable aggregation of metrics returned by the xref:reference:internal-metrics-reference.adoc[`/metrics`] endpoint. Aggregation can simplify monitoring by providing summarized data instead of raw, per-instance metrics. Metric aggregation is performed by summing the values of samples by labels and is done when it makes sense by the shard and/or partition labels.",
"related_topics": [
"xref:reference:internal-metrics-reference.adoc[`/metrics`]"
],
"config_scope": "cluster"
},
"api_doc_dir": {
"description": "Path to the API specifications directory. This directory contains API documentation for both the HTTP Proxy API and Schema Registry API.",
"config_scope": "broker",
"category": "pandaproxy"
},
"audit_enabled": {
"related_topics": [],
"config_scope": "cluster"
},
"audit_log_num_partitions": {
"description": "Defines the number of partitions used by a newly-created audit topic. This configuration applies only to the audit log topic and may be different from the cluster or other topic configurations. This cannot be altered for existing audit log topics.",
"config_scope": "cluster"
},
"audit_use_rpc": {
"description": "Use Redpanda's internal communication system to write audit logs. When disabled, Redpanda uses a Kafka client to write audit logs instead.",
"config_scope": "cluster"
},
"auto_create_topics_enabled": {
"description": "Allow automatic topic creation. To prevent excess topics, this property is not supported on Redpanda Cloud BYOC and Dedicated clusters. You should explicitly manage topic creation for these Redpanda Cloud clusters.\n\nIf you produce to a topic that doesn't exist, the topic will be created with defaults if this property is enabled.",
"config_scope": "cluster"
},
"broker_tls": {
"config_scope": "broker",
"category": "pandaproxy-client",
"description": "TLS configuration for the Kafka API servers to which the HTTP Proxy client should connect."
},
"brokers": {
"config_scope": "broker",
"category": "pandaproxy-client",
"description": "Network addresses of the Kafka API servers to which the HTTP Proxy client should connect."
},
"cleanup.policy": {
"description": "The cleanup policy to apply for log segments of a topic.\nWhen `cleanup.policy` is set, it overrides the cluster property xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`] for the topic.\n\n**Values**:\n\n- `delete` - Deletes data according to size-based or time-based retention limits, or both.\n- `compact` - Deletes data according to a key-based retention policy, discarding all but the latest value for each key.\n- `compact,delete` - The latest values are kept for each key, while the remaining data is deleted according to retention limits.",
"related_topics": [
"xref:cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`]",
"xref:manage:cluster-maintenance/disk-utilization.adoc#configure-segment-size[Configure segment size]",
"xref:manage:tiered-storage.adoc#compacted-topics-in-tiered-storage[Compacted topics in Tiered Storage]",
"xref:reference:properties/cluster-properties.adoc#log_cleanup_policy[`log_cleanup_policy`]"
],
"config_scope": "topic"
},
"client_cache_max_size": {
"config_scope": "broker",
"category": "pandaproxy",
"description": "The maximum number of Kafka client connections that Redpanda can cache in the LRU (least recently used) cache. The LRU cache helps optimize resource utilization by keeping the most recently used clients in memory, facilitating quicker reconnections for frequent clients while limiting memory usage."
},
"client_identifier": {
"config_scope": "broker",
"category": "pandaproxy-client",
"description": "Custom identifier to include in the Kafka request header for the HTTP Proxy client. This identifier can help debug or monitor client activities."
},
"client_keep_alive": {
"description": "Time, in milliseconds, that an idle client connection may remain open to the HTTP Proxy API.",
"config_scope": "broker",
"category": "pandaproxy"
},
"cloud_storage_access_key": {
"description": "AWS or GCP access key. This access key is part of the credentials that Redpanda requires to authenticate with object storage services for Tiered Storage. This access key is used with the <<cloud_storage_secret_key,`cloud_storage_secret_key`>> to form the complete credentials required for authentication.\nTo authenticate using IAM roles, see <<cloud_storage_credentials_source,`cloud_storage_credentials_source`>>."
},
"cloud_storage_api_endpoint": {
"description": "Optional API endpoint. The only instance in which you must set this value is when using a custom domain with your object storage service.\n\n- AWS: If not set, this is automatically generated using <<cloud_storage_region,region>> and <<cloud_storage_bucket,bucket>>. Otherwise, this uses the value assigned.\n- GCP: If not set, this is automatically generated using `storage.googleapis.com` and <<cloud_storage_bucket,bucket>>.\n- Azure: If not set, this is automatically generated using `blob.core.windows.net` and <<cloud_storage_azure_storage_account,`cloud_storage_azure_storage_account`>>. If you have enabled hierarchical namespaces for your storage account and use a custom endpoint, use <<cloud_storage_azure_adls_endpoint,`cloud_storage_azure_adls_endpoint`>>."
},
"cloud_storage_azure_adls_endpoint": {
"description": "Azure Data Lake Storage v2 endpoint override. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint.\n\nIf not set, this is automatically generated using `dfs.core.windows.net` and <<cloud_storage_azure_storage_account,`cloud_storage_azure_storage_account`>>."
},
"cloud_storage_azure_adls_port": {
"description": "Azure Data Lake Storage v2 port override. See also: <<cloud_storage_azure_adls_endpoint,`cloud_storage_azure_adls_endpoint`>>. Use when hierarchical namespaces are enabled on your storage account and you have set up a custom endpoint."
},
"cloud_storage_azure_container": {
"description": "The name of the Azure container to use with Tiered Storage. If `null`, the property is disabled."
},
"cloud_storage_azure_hierarchical_namespace_enabled": {
"description": "Force Redpanda to use or not use an Azure Data Lake Storage (ADLS) Gen2 hierarchical namespace-compliant client in <<cloud_storage_azure_storage_account,`cloud_storage_azure_storage_account`>>. \n\nWhen this property is not set, <<cloud_storage_azure_shared_key,`cloud_storage_azure_shared_key`>> must be set, and each broker checks at startup if a hierarchical namespace is enabled. \n\nWhen set to `true`, this property disables the check and assumes a hierarchical namespace is enabled. \n\nWhen set to `false`, this property disables the check and assumes a hierarchical namespace is not enabled. \n\nThis setting should be used only in emergencies where Redpanda fails to detect the correct a hierarchical namespace status."
},
"cloud_storage_azure_managed_identity_id": {
"description": "The managed identity ID to use for access to the Azure storage account. To use Azure managed identities, you must set <<cloud_storage_credentials_source,`cloud_storage_credentials_source`>> to `azure_vm_instance_metadata`. See xref:manage:security/iam-roles.adoc[IAM Roles] for more information on managed identities.\n\n*Type*: string\n\n*Default*: null\n\n*Requires restart*: No\n\n*Supported versions*: Redpanda v24.1 or later\n\n---",
"related_topics": [
"xref:manage:security/iam-roles.adoc[IAM Roles]"
],
"config_scope": "cluster"
},
"cloud_storage_azure_shared_key": {
"description": "The account access key to be used for Azure Shared Key authentication with the Azure storage account configured by <<cloud_storage_azure_storage_account,`cloud_storage_azure_storage_account`>>. If `null`, the property is disabled.\n\nNOTE: Redpanda expects this key string to be Base64 encoded.\n\n*Requires restart*: Yes",
"config_scope": "cluster"
},
"cloud_storage_backend": {
"description": "Optional object storage backend variant used to select API capabilities. If not supplied, this will be inferred from other configuration properties."
},
"cloud_storage_bucket": {
"description": "AWS or GCP bucket that should be used to store data.\n\nWARNING: Modifying this property after writing data to a bucket could cause data loss."
},
"cloud_storage_cache_directory": {
"description": "Directory for archival cache. Set when the xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`] cluster property is enabled. If not specified, Redpanda uses a default path within the data directory.",
"example": [
"[,yaml]",
"----",
"redpanda:",
" cloud_storage_cache_directory: <cache-directory-path>",
"----",
"\n",
"Replace `<cache-directory-path>` with the full path to your desired cache directory."
],
"related_topics": [
"xref:reference:properties/cluster-properties.adoc#cloud_storage_enabled[`cloud_storage_enabled`]"
],
"config_scope": "broker",
"category": "redpanda"
},
"cloud_storage_cache_max_objects": {
"description": "Maximum number of objects that may be held in the Tiered Storage cache. This applies simultaneously with <<cloud_storage_cache_size,`cloud_storage_cache_size`>>, and whichever limit is hit first will trigger trimming of the cache."
},
"cloud_storage_cache_size": {
"description": "Maximum size of the object storage cache, in bytes.\n\nThis property works together with <<cloud_storage_cache_size_percent,`cloud_storage_cache_size_percent`>> to define cache behavior:\n\n- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes.\n\n- If one of these properties is set to `0`, Redpanda uses the non-zero value.\n\n- These properties cannot both be `0`.\n\n- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`."
},
"cloud_storage_cache_size_percent": {
"related_topics": [
"xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`]"
],
"description": "Maximum size of the cache as a percentage, minus the space that Redpanda avoids using defined by the xref:reference:cluster-properties.adoc#disk_reservation_percent[`disk_reservation_percent`] cluster property. This is calculated at startup and dynamically updated if either this property, `disk_reservation_percent`, or <<cloud_storage_cache_size,`cloud_storage_cache_size`>> changes.\n\nThis property works together with <<cloud_storage_cache_size,`cloud_storage_cache_size`>> to define cache behavior:\n\n- When both properties are set, Redpanda uses the smaller calculated value of the two, in bytes.\n\n- If one of these properties is set to `0`, Redpanda uses the non-zero value.\n\n- These properties cannot both be `0`.\n\n- `cloud_storage_cache_size` cannot be `0` while `cloud_storage_cache_size_percent` is `null`.",
"config_scope": "cluster"
},
"cloud_storage_cache_trim_threshold_percent_objects": {
"description": "Cache trimming is triggered when the number of objects in the cache reaches this percentage relative to its maximum object count. If unset, the default behavior is to start trimming when the cache is full.",
"version": "24.1.10"
},
"cloud_storage_cache_trim_threshold_percent_size": {
"description": "Cache trimming is triggered when the cache size reaches this percentage relative to its maximum capacity. If unset, the default behavior is to start trimming when the cache is full.",
"version": "24.1.10"
},
"cloud_storage_cache_trim_walk_concurrency": {
"description": "The maximum number of concurrent tasks launched for traversing the directory structure during cache trimming. A higher number allows cache trimming to run faster but can cause latency spikes due to increased pressure on I/O subsystem and syscall threads."
},
"cloud_storage_chunk_prefetch": {
"description": "Number of chunks to prefetch ahead of every downloaded chunk. Prefetching additional chunks can enhance read performance by reducing wait times for sequential data access. A value of `0` disables prefetching, relying solely on on-demand downloads. Adjusting this property allows for tuning the balance between improved read performance and increased network and storage I/O."
},
"cloud_storage_client_lease_timeout_ms": {
"description": "The maximum time Redpanda holds a connection to object storage before closing it. After this timeout, any active connection is immediately closed and must be re-established for subsequent operations.",
"config_scope": "cluster"
},
"cloud_storage_cluster_name": {
"related_topics": [
"xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]"
],
"description": "A unique name for this cluster's metadata in object storage. Use this when multiple clusters share the same storage bucket (for example, for xref:manage:whole-cluster-restore.adoc[Whole Cluster Restore]). The name must be unique within the bucket, 1-64 characters, and use only letters, numbers, underscores, and hyphens. Don't change this value once set.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.",
"config_scope": "cluster"
},
"cloud_storage_credentials_host": {
"description": "The hostname to connect to for retrieving role based credentials. Derived from <<cloud_storage_credentials_source,`cloud_storage_credentials_source`>> if not set. Only required when using IAM role based access. To authenticate using access keys, see <<cloud_storage_access_key,`cloud_storage_access_key`>>."
},
"cloud_storage_credentials_source": {
"description": "The source of credentials used to authenticate to object storage services.\nRequired for AWS or GCP authentication with IAM roles.\n\nTo authenticate using access keys, see <<cloud_storage_access_key,`cloud_storage_access_key`>>."
},
"cloud_storage_crl_file": {
"description": "Path to certificate revocation list for <<cloud_storage_trust_file, `cloud_storage_trust_file`>>."
},
"cloud_storage_disable_archival_stm_rw_fence": {
"description": "Disables the concurrency control mechanism in Tiered Storage. This safety feature keeps data organized and correct when multiple processes access it simultaneously. Disabling it can cause data consistency problems, so use this setting only for testing, never in production systems."
},
"cloud_storage_disable_archiver_manager": {
"description": "Use legacy upload mode and do not start archiver_manager.",
"config_scope": "cluster"
},
"cloud_storage_disable_chunk_reads": {
"description": "Disable chunk reads and switch back to legacy mode where full segments are downloaded. When set to `true`, this option disables the more efficient chunk-based reads, causing Redpanda to download entire segments. This legacy behavior might be useful in specific scenarios where chunk-based fetching is not optimal."
},
"cloud_storage_disable_read_replica_loop_for_tests": {
"description": "Begins the read replica sync loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production."
},
"cloud_storage_disable_remote_labels_for_tests": {
"description": "If `true`, Redpanda disables remote labels and falls back on the hash-based object naming scheme for new topics. \n\nCAUTION: This property exists to simplify testing and shouldn't be set in production.",
"config_scope": "cluster"
},
"cloud_storage_disable_upload_consistency_checks": {
"description": "Disable all upload consistency checks to allow Redpanda to upload logs with gaps and replicate metadata with consistency violations. Do not change the default value unless requested by Redpanda Support."
},
"cloud_storage_disable_upload_loop_for_tests": {
"description": "Begins the upload loop in topic partitions with Tiered Storage enabled. The property exists to simplify testing and shouldn't be set in production."
},
"cloud_storage_enable_compacted_topic_reupload": {
"description": "Enable re-uploading data for compacted topics.\nWhen set to `true`, Redpanda can re-upload data for compacted topics to object storage, ensuring that the most current state of compacted topics is available in the cloud. Disabling this property (`false`) may reduce storage and network overhead but at the risk of not having the latest compacted data state in object storage."
},
"cloud_storage_enable_remote_allow_gaps": {
"description": "Controls the eviction of locally stored log segments when Tiered Storage uploads are paused. Set to `false` to only evict data that has already been uploaded to object storage. If the retained data fills the local volume, Redpanda throttles producers. Set to `true` to allow the eviction of locally stored log segments, which may create gaps in offsets."
},
"cloud_storage_enable_remote_read": {
"description": "Default remote read config value for new topics.\nWhen set to `true`, new topics are by default configured to allow reading data directly from object storage, facilitating access to older data that might have been offloaded as part of Tiered Storage. With the default set to `false`, remote reads must be explicitly enabled at the topic level."
},
"cloud_storage_enable_remote_write": {
"description": "Default remote write value for new topics.\nWhen set to `true`, new topics are by default configured to upload data to object storage. With the default set to `false`, remote write must be explicitly enabled at the topic level."
},
"cloud_storage_enable_scrubbing": {
"description": "Enable routine checks (scrubbing) of object storage partitions. The scrubber validates the integrity of data and metadata uploaded to object storage."
},
"cloud_storage_enable_segment_merging": {
"related_topics": [
"xref:manage:tiered-storage.adoc#object-storage-housekeeping[Object storage housekeeping]"
]
},
"cloud_storage_enable_segment_uploads": {
"description": "Controls the upload of log segments to Tiered Storage. If set to `false`, this property temporarily pauses all log segment uploads from the Redpanda cluster. When the uploads are paused, the <<cloud_storage_enable_remote_allow_gaps, `cloud_storage_enable_remote_allow_gaps`>> cluster configuration and `redpanda.remote.allowgaps` topic properties control local retention behavior.",
"related_topics": [
"xref:properties/topic-properties.adoc#redpandaremoteallowgaps[`redpanda.remote.allowgaps`]"
]
},
"cloud_storage_enabled": {
"related_topics": []
},
"cloud_storage_full_scrub_interval_ms": {
"description": "Interval, in milliseconds, between a final scrub and the next scrub."
},
"cloud_storage_garbage_collect_timeout_ms": {
"description": "Timeout for running the cloud storage garbage collection, in milliseconds."
},
"cloud_storage_gc_max_segments_per_run": {
"version": "v25.3.11",
"description": "Maximum number of log segments to delete from object storage during each housekeeping run. This limits the rate of object deletions to prevent overwhelming the object storage API. Each segment requires 2-3 object storage delete operations (for the data file, index file, and optionally a transaction manifest), so this value directly controls API request rate. See xref:manage:tiered-storage.adoc#object-storage-housekeeping[Object storage housekeeping]."
},
"cloud_storage_graceful_transfer_timeout_ms": {
"description": "Time limit on waiting for uploads to complete before a leadership transfer. If this is `null`, leadership transfers proceed without waiting."
},
"cloud_storage_housekeeping_interval_ms": {
"description": "Interval, in milliseconds, between object storage housekeeping tasks."
},
"cloud_storage_hydrated_chunks_per_segment_ratio": {
"description": "The maximum number of chunks per segment that can be hydrated at a time. Above this number, unused chunks are trimmed.\n\nA segment is divided into chunks. Chunk hydration means downloading the chunk (which is a small part of a full segment) from cloud storage and placing it in the local disk cache. Redpanda periodically removes old, unused chunks from your local disk. This process is called chunk eviction. This property controls how many chunks can be present for a given segment in local disk at a time, before eviction is triggered, removing the oldest ones from disk. Note that this property is not used for the default eviction strategy which simply removes all unused chunks."
},
"cloud_storage_hydration_timeout_ms": {
"description": "Time to wait for a hydration request to be fulfilled. If hydration is not completed within this time, the consumer is notified with a timeout error.\n\nNegative doesn't make sense, but it may not be checked-for/enforced. Large is subjective, but a huge timeout also doesn't make sense. This particular config doesn't have a min/max bounds control, but it probably should to avoid mistakes."
},
"cloud_storage_idle_threshold_rps": {
"description": "The object storage request rate threshold for idle state detection. If the average request rate for the configured period is lower than this threshold, the object storage is considered idle."
},
"cloud_storage_idle_timeout_ms": {
"description": "The timeout, in milliseconds, used to detect the idle state of the object storage API. If the average object storage request rate is below this threshold for a configured amount of time, the object storage is considered idle and the housekeeping jobs are started."
},
"cloud_storage_initial_backoff_ms": {
"description": "Initial backoff time for exponential backoff algorithm (ms)."
},
"cloud_storage_inventory_hash_path_directory": {
"description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.",
"example": [
"[,yaml]",
"----",
"redpanda:",
" cloud_storage_inventory_hash_store: <inventory-hash-directory-path>",
"----",
"",
"Replace `<inventory-hash-directory-path>` with the full path to your desired inventory hash storage directory."
],
"config_scope": "broker"
},
"cloud_storage_inventory_hash_store": {
"description": "Directory to store inventory report hashes for use by cloud storage scrubber. If not specified, Redpanda uses a default path within the data directory.",
"example": [
"[,yaml]",
"----",
"redpanda:",
" cloud_storage_inventory_hash_store: <inventory-hash-directory-path>",
"----"
],
"config_scope": "broker",
"category": "redpanda"
},
"cloud_storage_inventory_max_hash_size_during_parse": {
"description": "Maximum bytes of hashes held in memory before writing data to disk during inventory report parsing. This affects the number of files written to disk during inventory report parsing. When this limit is reached, new files are written to disk."
},
"cloud_storage_manifest_cache_size": {
"description": "Amount of memory that can be used to handle Tiered Storage metadata."
},
"cloud_storage_manifest_max_upload_interval_sec": {
"description": "Minimum interval, in seconds, between partition manifest uploads. Actual time between uploads may be greater than this interval. If this is `null`, metadata is updated after each segment upload."
},
"cloud_storage_manifest_upload_timeout_ms": {
"description": "Manifest upload timeout, in milliseconds."
},
"cloud_storage_materialized_manifest_ttl_ms": {
"description": "The interval, in milliseconds, determines how long the materialized manifest can stay in the cache under contention. This setting is used for performance tuning. When the spillover manifest is materialized and stored in the cache, and the cache needs to evict it, it uses this value as a timeout. The cursor that uses the spillover manifest uses this value as a TTL interval, after which it stops referencing the manifest making it available for eviction. This only affects spillover manifests under contention.",
"config_scope": "cluster"
},
"cloud_storage_max_concurrent_hydrations_per_shard": {
"description": "Maximum concurrent segment hydrations of remote data per CPU core. If unset, value of `cloud_storage_max_connections / 2` is used, which means that half of available object storage bandwidth could be used to download data from object storage. If the cloud storage cache is empty every new segment reader will require a download. This will lead to 1:1 mapping between number of partitions scanned by the fetch request and number of parallel downloads. If this value is too large the downloads can affect other workloads. In case of any problem caused by the tiered-storage reads this value can be lowered. This will only affect segment hydrations (downloads) but won't affect cached segments. If fetch request is reading from the tiered-storage cache its concurrency will only be limited by available memory."
},
"cloud_storage_max_connection_idle_time_ms": {
"description": "Defines the maximum duration an HTTPS connection to object storage can stay idle, in milliseconds, before being terminated.\nThis setting reduces resource utilization by closing inactive connections. Adjust this property to balance keeping connections ready for subsequent requests and freeing resources associated with idle connections."
},
"cloud_storage_max_segment_readers_per_shard": {
"description": "Maximum concurrent I/O cursors of materialized remote segments per CPU core. If unset, the value of `topic_partitions_per_shard` is used, where one segment reader per partition is used if the shard is at its maximum partition capacity. These readers are cached across Kafka consume requests and store a readahead buffer."
},
"cloud_storage_max_segments_pending_deletion_per_partition": {
"description": "The per-partition limit for the number of segments pending deletion from the cloud. Segments can be deleted due to retention or compaction. If this limit is breached and deletion fails, then segments are orphaned in the cloud and must be removed manually."
},
"cloud_storage_max_throughput_per_shard": {
"description": "Maximum bandwidth allocated to Tiered Storage operations per shard, in bytes per second.\nThis setting limits the Tiered Storage subsystem's throughput per shard, facilitating precise control over bandwidth usage in testing scenarios. In production environments, use `cloud_storage_throughput_limit_percent` for more dynamic throughput management based on actual storage capabilities."
},
"cloud_storage_metadata_sync_timeout_ms": {
"description": "Timeout for xref:manage:tiered-storage.adoc[] metadata synchronization."
},
"cloud_storage_min_chunks_per_segment_threshold": {
"description": "The minimum number of chunks per segment for trimming to be enabled. If the number of chunks in a segment is below this threshold, the segment is small enough that all chunks in it can be hydrated at any given time."
},
"cloud_storage_prefetch_segments_max": {
"version": "v26.1.1"
},
"cloud_storage_readreplica_manifest_sync_timeout_ms": {
"description": "Timeout to check if new data is available for partitions in object storage for read replicas."
},
"cloud_storage_recovery_temporary_retention_bytes_default": {
"description": "Retention in bytes for topics created during automated recovery."
},
"cloud_storage_recovery_topic_validation_depth": {
"description": "Number of metadata segments to validate, from newest to oldest, when <<cloud_storage_recovery_topic_validation_mode,`cloud_storage_recovery_topic_validation_mode`>> is set to `check_manifest_and_segment_metadata`."
},
"cloud_storage_recovery_topic_validation_mode": {
"description": "Validation performed before recovering a topic from object storage. In case of failure, the reason for the failure appears as `ERROR` lines in the Redpanda application log. For each topic, this reports errors for all partitions, but for each partition, only the first error is reported.\n\nThis property accepts the following parameters:\n\n- `no_check`: Skips the checks for topic recovery.\n- `check_manifest_existence`: Runs an existence check on each `partition_manifest`. Fails if there are connection issues to the object storage.\n- `check_manifest_and_segment_metadata`: Downloads the manifest and runs a consistency check, comparing the metadata with the cloud storage objects. The process fails if metadata references any missing cloud storage objects.\n\nExample: Redpanda validates the topic `kafka/panda-topic-recovery-NOT-OK` and stops due to a fatal error on partition 0:\n\n```bash\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - [fiber11|0|299996ms recovery validation of {kafka/panda-topic-recovery-NOT-OK/0}/24] - manifest metadata check: missing segment, validation not ok\nERROR 2024-04-24 21:29:08,166 [shard 1:main] cluster - topics_frontend.cc:519 - Stopping recovery of {kafka/panda-topic-recovery-NOT-OK} due to validation error\n```\n\nEach failing partition error message has the following format:\n\n```bash\nERROR .... [... recovery validation of {<namespace/topic/partition>}...] - <failure-reason>, validation not ok\n```\n\nAt the end of the process, Redpanda outputs a final ERROR message: \n\n```bash\nERROR ... ... - Stopping recovery of {<namespace/topic>} due to validation error\n```"
},
"cloud_storage_roles_operation_timeout_ms": {
"description": "Timeout for IAM role related operations (ms)."
},
"cloud_storage_scrubbing_interval_jitter_ms": {
"description": "Jitter applied to the object storage scrubbing interval."
},
"cloud_storage_segment_max_upload_interval_sec": {
"description": "Time that a segment can be kept locally without uploading it to the object storage, in seconds."
},
"cloud_storage_segment_size_min": {
"description": "Smallest acceptable segment size in the object storage. Default: `cloud_storage_segment_size_target`/2."
},
"cloud_storage_segment_size_target": {
"description": "Desired segment size in the object storage. The default is set in the topic-level `segment.bytes` property."
},
"cloud_storage_segment_upload_timeout_ms": {
"description": "Log segment upload timeout, in milliseconds."
},
"cloud_storage_spillover_manifest_max_segments": {
"description": "Maximum number of segments in the spillover manifest that can be offloaded to the object storage. This setting serves as a threshold for triggering data offload based on the number of segments, rather than the total size of the manifest. It is designed for use in testing environments to control the offload behavior more granularly. In production settings, manage offloads based on the manifest size through `cloud_storage_spillover_manifest_size` for more predictable outcomes."
},
"cloud_storage_spillover_manifest_size": {
"description": "The size of the manifest which can be offloaded to the cloud. If the size of the local manifest stored in Redpanda exceeds `cloud_storage_spillover_manifest_size` by two times the spillover mechanism will split the manifest into two parts and one will be uploaded to object storage."
},
"cloud_storage_throughput_limit_percent": {
"description": "Maximum throughput used by Tiered Storage per broker expressed as a percentage of the disk bandwidth. If the server has several disks, Redpanda uses the one that stores the Tiered Storage cache. Even if Tiered Storage is allowed to use the full bandwidth of the disk (100%), it won't necessarily use it in full. The actual usage depends on your workload and the state of the Tiered Storage cache. This setting is a safeguard that prevents Tiered Storage from using too many system resources: it is not a performance tuning knob."
},
"cloud_storage_topic_purge_grace_period_ms": {
"description": "Grace period during which the purger refuses to purge the topic."
},
"cloud_storage_upload_ctrl_d_coeff": {
"description": "Derivative coefficient for upload PID controller."
},
"cloud_storage_upload_ctrl_max_shares": {
"description": "Maximum number of I/O and CPU shares that archival upload can use."
},
"cloud_storage_upload_ctrl_min_shares": {
"description": "Minimum number of I/O and CPU shares that archival upload can use."
},
"cloud_storage_upload_ctrl_p_coeff": {
"description": "Proportional coefficient for upload PID controller."
},
"cloud_storage_upload_ctrl_update_interval_ms": {
"description": "The interval (in milliseconds) for updating the controller that manages the priority of Tiered Storage uploads. This property determines how frequently the system recalculates and adjusts the work scheduling for uploads to object storage.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support."
},
"cloud_storage_upload_loop_initial_backoff_ms": {
"description": "Initial backoff interval when there is nothing to upload for a partition, in milliseconds."
},
"cloud_storage_upload_loop_max_backoff_ms": {
"description": "Maximum backoff interval when there is nothing to upload for a partition, in milliseconds."
},
"cloud_storage_url_style": {
"description": "Configure the addressing style that controls how Redpanda formats bucket URLs for S3-compatible object storage.\n\nLeave this property unset (`null`) to use automatic configuration:\n\n* For AWS S3: Redpanda attempts `virtual_host` addressing first, then falls back to `path` style if needed\n* For MinIO: Redpanda automatically uses `path` style regardless of `MINIO_DOMAIN` configuration\n\nSet this property explicitly to override automatic configuration, ensure consistent behavior across deployments, or when using S3-compatible storage that requires a specific URL format.\n\nCAUTION: AWS requires virtual-hosted addressing for buckets created after September 30, 2020. If you use AWS S3 with buckets created after this date, use `virtual_host` addressing.\n\nNOTE: For MinIO deployments, Redpanda defaults to `path` style when this property is unset. To use `virtual_host` addressing with a configured `MINIO_DOMAIN`, set this property explicitly to `virtual_host`. For other S3-compatible storage backends, consult your provider's documentation to determine the required URL style.",
"config_scope": "cluster"
},
"cloud_topics_allow_materialization_failure": {
"version": "v26.1.1"
},
"cloud_topics_compaction_interval_ms": {
"version": "v26.1.1"
},
"cloud_topics_compaction_key_map_memory": {
"version": "v26.1.1"
},
"cloud_topics_compaction_max_object_size": {
"version": "v26.1.1"
},
"cloud_topics_disable_level_zero_gc_for_tests": {
"version": "v26.1.1"
},
"cloud_topics_disable_metastore_flush_loop_for_tests": {
"version": "v26.1.1"
},
"cloud_topics_disable_reconciliation_loop": {
"config_scope": "cluster"
},
"cloud_topics_enabled": {
"description": "Enable Cloud Topics for the cluster. Cloud Topics are optimized for high-throughput, cost-sensitive workloads that can tolerate higher latencies compared to standard Kafka topics.",
"related_topics": [
"self-managed-only: xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics]"
],
"config_scope": "cluster"
},
"cloud_topics_epoch_service_epoch_increment_interval": {
"description": "The interval, in milliseconds, at which the cluster epoch is incremented.\n\nThe cluster epoch is a frozen point in time of the committed offset of the controller log, used to coordinate partition creation and track changes in Tiered Storage. This property controls how frequently the epoch is refreshed. More frequent updates provide finer-grained coordination but may increase overhead.\n\nDecrease this interval if you need more frequent epoch updates for faster coordination in Tiered Storage operations, or increase it to reduce coordination overhead in stable clusters.",
"version": "v25.3.3"
},
"cloud_topics_epoch_service_local_epoch_cache_duration": {
"description": "The duration, in milliseconds, for which a cluster-wide epoch is cached locally on each broker.\n\nCaching the epoch locally reduces the need for frequent coordination with the controller. This property controls how long each broker can use a cached epoch value before fetching the latest value.\n\nIncrease this value to reduce coordination overhead in clusters with stable workloads. Decrease it if you need brokers to react more quickly to epoch changes in Tiered Storage.",
"version": "v25.3.3"
},
"cloud_topics_epoch_service_max_same_epoch_duration": {
"version": "v26.1.1"
},
"cloud_topics_fetch_debounce_enabled": {
"version": "v26.1.1"
},
"cloud_topics_gc_health_check_interval": {
"version": "v26.1.1"
},
"cloud_topics_l1_indexing_interval": {
"version": "v26.1.1"
},
"cloud_topics_long_term_file_deletion_delay": {
"version": "v26.1.1"
},
"cloud_topics_long_term_flush_interval": {
"version": "v26.1.1"
},
"cloud_topics_long_term_garbage_collection_interval": {
"config_scope": "cluster"
},
"cloud_topics_metastore_lsm_apply_timeout_ms": {
"version": "v26.1.1"
},
"cloud_topics_metastore_replication_timeout_ms": {
"version": "v26.1.1"
},
"cloud_topics_num_metastore_partitions": {
"version": "v26.1.1"
},
"cloud_topics_parallel_fetch_enabled": {
"version": "v26.1.1"
},
"cloud_topics_preregistered_object_ttl": {
"version": "v26.1.1"
},
"cloud_topics_produce_batching_size_threshold": {
"config_scope": "cluster"
},
"cloud_topics_produce_cardinality_threshold": {
"config_scope": "cluster"
},
"cloud_topics_produce_no_pid_concurrency": {
"version": "v26.1.1"
},
"cloud_topics_produce_upload_interval": {
"config_scope": "cluster"
},
"cloud_topics_produce_write_inflight_limit": {
"version": "v26.1.1"
},
"cloud_topics_reconciliation_interval": {
"config_scope": "cluster"
},
"cloud_topics_reconciliation_max_interval": {
"version": "v26.1.1"
},
"cloud_topics_reconciliation_max_object_size": {
"version": "v26.1.1"
},
"cloud_topics_reconciliation_min_interval": {
"version": "v26.1.1"
},
"cloud_topics_reconciliation_parallelism": {
"version": "v26.1.1"
},
"cloud_topics_reconciliation_slowdown_blend": {
"version": "v26.1.1"
},
"cloud_topics_reconciliation_speedup_blend": {
"version": "v26.1.1"
},
"cloud_topics_reconciliation_target_fill_ratio": {
"version": "v26.1.1"
},
"cloud_topics_short_term_gc_backoff_interval": {
"description": "The interval, in milliseconds, between invocations of the L0 garbage collection work loop when no progress is being made or errors are occurring.\n\nL0 (level-zero) objects are short-term data objects in Tiered Storage that are periodically garbage collected. When GC encounters errors or cannot make progress (for example, if there are no objects eligible for deletion), this backoff interval prevents excessive retries.\n\nIncrease this value to reduce system load when GC cannot make progress. Decrease it if you need faster retry attempts after transient errors.",
"version": "v25.3.3"
},
"cloud_topics_short_term_gc_interval": {
"description": "The interval, in milliseconds, between invocations of the L0 (level-zero) garbage collection work loop when progress is being made.\n\nL0 objects are short-term data objects in Tiered Storage associated with global epochs. This property controls how frequently GC runs when it successfully deletes objects. Lower values increase GC frequency, which can help maintain lower object counts but may increase S3 API usage.\n\nDecrease this value if L0 object counts are growing too quickly and you need more aggressive garbage collection. Increase it to reduce S3 API costs in clusters with lower ingestion rates.",
"version": "v25.3.3"
},
"cloud_topics_short_term_gc_minimum_object_age": {
"description": "The minimum age, in milliseconds, of an L0 (level-zero) object before it becomes eligible for garbage collection.\n\nThis grace period delays deletion of L0 objects even after they become eligible based on epoch. The delay provides a safety buffer that can support recovery in cases involving accidental deletion or other operational issues.\n\nIncrease this value to extend the retention window for L0 objects, providing more time for recovery from operational errors. Decrease it to free up object storage space more quickly, but with less protection against accidental deletion.",
"version": "v25.3.3"
},
"cloud_topics_upload_part_size": {
"version": "v26.1.1"
},
"cluster_id": {
"description": "Cluster identifier.",
"config_scope": "cluster"
},
"compaction.strategy": {
"description": "Specifies the strategy used to determine which records to remove during log compaction. The compaction strategy controls how Redpanda identifies and removes duplicate records while preserving the latest value for each key.",
"related_topics": [
"xref:reference:properties/cluster-properties.adoc#compaction_strategy[`compaction_strategy`]"
],
"config_scope": "topic"
},
"compaction_ctrl_update_interval_ms": {
"description": "The interval (in milliseconds) for updating the controller responsible for compaction tasks. The controller uses this interval to decide how to prioritize background compaction work, which is essential for maintaining efficient storage use.\n\nThis is an internal-only configuration and should be enabled only after consulting with Redpanda support.",
"config_scope": "cluster"
},
"compression.type": {
"description": "Redpanda ignores this property and always uses producer compression semantics. If producers send compressed data, Redpanda stores and serves it as-is. If producers send uncompressed data, Redpanda stores it uncompressed.\n\nThis property exists for Apache Kafka compatibility. Configure compression in your producers instead of using this topic property.\n\nCompression reduces message size and improves throughput, but increases CPU utilization. Enable producer batching to increase compression efficiency.\n\nWhen set, this property overrides the cluster property xref:./cluster-properties.adoc#log_compression_type[`log_compression_type`] for the topic.",
"related_topics": [
"xref:reference:properties/cluster-properties.adoc#log_compression_type[`log_compression_type`]",
"xref:develop:produce-data/configure-producers.adoc#message-batching[Message batching]",
"xref:develop:produce-data/configure-producers.adoc#commonly-used-producer-configuration-options[Common producer configuration options]"
],
"config_scope": "topic"
},
"confluent.key.schema.validation": {
"description": "Enable validation of the schema ID for keys on a record. This is a compatibility alias for `redpanda.key.schema.id.validation`. When enabled, Redpanda validates that the schema ID encoded in the record's key is registered in the Schema Registry according to the configured subject name strategy.",
"config_scope": "topic",
"related_topics": [
"xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]"
]
},
"confluent.key.subject.name.strategy": {
"description": "The subject name strategy for keys when `confluent.key.schema.validation` is enabled. This is a compatibility alias for `redpanda.key.subject.name.strategy` that determines how the topic and schema are mapped to a subject name in the Schema Registry.",
"config_scope": "topic",
"related_topics": [
"xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]"
]
},
"confluent.value.schema.validation": {
"description": "Enable validation of the schema ID for values on a record. This is a compatibility alias for <<redpandavalueschemavalidation, `redpanda.value.schema.id.validation`>>. When enabled, Redpanda validates that the schema ID encoded in the record's value is registered in the Schema Registry according to the configured subject name strategy.",
"config_scope": "topic",
"related_topics": [
"xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]"
]
},
"confluent.value.subject.name.strategy": {
"description": "The subject name strategy for values when `confluent.value.schema.validation` is enabled. This is a compatibility alias for <<redpandavaluesubjectnamestrategy, `redpanda.value.subject.name.strategy`>>. This determines how the topic and schema are mapped to a subject name in the Schema Registry.",
"config_scope": "topic",
"related_topics": [
"xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]"
]
},
"consumer_group_lag_collection_interval_sec": {
"description": "How often to run the collection loop when <<enable_consumer_group_metrics,`enable_consumer_group_metrics`>> contains `consumer_lag`.\n\nReducing the value of `consumer_group_lag_collection_interval_sec` increases the metric collection frequency, which may raise resource utilization. In most environments, this impact is minimal, but it's best practice to monitor broker resource usage in high-scale settings.",
"config_scope": "cluster"
},
"consumer_heartbeat_interval_ms": {
"description": "Interval (in milliseconds) for consumer heartbeats.",
"config_scope": "broker",
"category": "pandaproxy-client"
},
"consumer_instance_timeout_ms": {
"description": "How long to wait for an idle consumer before removing it. A consumer is considered idle when it's not making requests or heartbeats.",
"config_scope": "broker",
"category": "pandaproxy"
},
"consumer_offsets_topic_batch_cache_enabled": {
"description": "This property lets you enable the batch cache for the consumer offsets topic. By default, the cache for consumer offsets topic is disabled. Changing this property is not recommended in production systems, as it may affect performance. The change is applied only after the restart.",
"config_scope": "cluster"
},
"consumer_rebalance_timeout_ms": {
"description": "Timeout (in milliseconds) for consumer rebalance.",
"config_scope": "broker",
"category": "pandaproxy-client"
},
"consumer_request_max_bytes": {
"description": "Maximum bytes to fetch per request.",
"config_scope": "broker",
"category": "pandaproxy-client"
},
"consumer_request_min_bytes": {
"description": "Minimum bytes to fetch per request.",
"config_scope": "broker",
"category": "pandaproxy-client"
},
"consumer_request_timeout_ms": {
"description": "Interval (in milliseconds) for consumer request timeout.",
"config_scope": "broker",
"category": "pandaproxy-client"
},
"consumer_session_timeout_ms": {
"description": "Timeout (in milliseconds) for consumer session.",
"config_scope": "broker",
"category": "pandaproxy-client"
},
"controller_backend_reconciliation_concurrency": {
"description": "The maximum number of cluster updates the controller can process at the same time. Higher values speed up cluster changes but use more resources.",
"config_scope": "cluster"
},
"controller_log_accummulation_rps_capacity_topic_operations": {
"description": "Maximum capacity of rate limit accumulation in controller topic operations limit.",
"config_scope": "cluster"
},
"core_balancing_continuous": {
"related_topics": [],
"config_scope": "cluster"
},
"core_balancing_debounce_timeout": {
"description": "Interval, in milliseconds, between trigger and invocation of core balancing.\n\n*Unit*: milliseconds",
"config_scope": "cluster"
},
"crash_loop_limit": {
"config_scope": "broker",
"category": "redpanda",
"description": "A limit on the number of consecutive times a broker can crash within one hour before its crash-tracking logic is reset. This limit prevents a broker from getting stuck in an infinite cycle of crashes.\n\nIf `null`, the property is disabled and no limit is applied.\n\nThe crash-tracking logic is reset (to zero consecutive crashes) by any of the following conditions:\n\n* The broker shuts down cleanly.\n* One hour passes since the last crash.\n* The `redpanda.yaml` broker configuration file is updated.\n* The `startup_log` file in the broker's <<data_directory, `data_directory`>> broker property is manually deleted."
},
"crash_loop_sleep_sec": {
"description": "The amount of time the broker sleeps before terminating when the limit on consecutive broker crashes (<<crash_loop_limit, `crash_loop_limit`>>) is reached. This property provides a debugging window for you to access the broker before it terminates, and is particularly useful in Kubernetes environments.\n\nIf `null`, the property is disabled, and the broker terminates immediately after reaching the crash loop limit.\n\nFor information about how to reset the crash loop limit, see the <<crash_loop_limit, `crash_loop_limit`>> broker property.",
"version": "v24.3.4",
"config_scope": "broker",
"category": "redpanda"
},
"data_directory": {
"config_scope": "broker",
"category": "redpanda"
},
"data_transforms_binary_max_size": {
"description": "The maximum size for a deployable WebAssembly binary that the broker can store.",
"config_scope": "cluster"
},
"data_transforms_per_core_memory_reservation": {
"description": "The amount of memory to reserve per core for data transform (Wasm) virtual machines. Memory is reserved on boot. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.",
"config_scope": "cluster"
},
"data_transforms_per_function_memory_limit": {
"description": "The amount of memory to give an instance of a data transform (Wasm) virtual machine. The maximum number of functions that can be deployed to a cluster is equal to `data_transforms_per_core_memory_reservation` / `data_transforms_per_function_memory_limit`.",
"config_scope": "cluster"
},
"data_transforms_read_buffer_memory_percentage": {
"description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for read buffers.",
"config_scope": "cluster"
},
"data_transforms_write_buffer_memory_percentage": {
"description": "include::reference:partial$internal-use-property.adoc[]\n\nThe percentage of available memory in the transform subsystem to use for write buffers.",
"config_scope": "cluster"
},
"datalake_coordinator_snapshot_max_delay_secs": {
"description": "Maximum amount of time the coordinator waits to snapshot after a command appears in the log.",
"config_scope": "cluster"
},
"datalake_disk_space_monitor_enable": {
"description": "Option to explicitly disable enforcement of datalake disk space usage.",
"config_scope": "cluster"
},
"datalake_scheduler_max_concurrent_translations": {
"description": "The maximum number of translations that the datalake scheduler will allow to run at a given time. If a translation is requested, but the number of running translations exceeds this value, the request will be put to sleep temporarily, polling until capacity becomes available.",
"config_scope": "cluster"
},
"datalake_scheduler_time_slice_ms": {
"description": "Time, in milliseconds, for a datalake translation as scheduled by the datalake scheduler. After a translation is scheduled, it will run until either the time specified has elapsed or all pending records on its source partition have been translated.",
"config_scope": "cluster"
},
"datalake_scratch_space_soft_limit_size_percent": {
"description": "Size of the scratch space datalake soft limit expressed as a percentage of the `datalake_scratch_space_size_bytes` configuration value.",
"config_scope": "cluster"
},
"default_leaders_preference": {
"description": "Default settings for preferred location of topic partition leaders. It can be either \"none\" (no preference), or \"racks:<rack1>,<rack2>,...\" (prefer brokers with rack ID from the list).\n\nThe list can contain one or more rack IDs. If you specify multiple IDs, Redpanda tries to distribute the partition leader locations equally across brokers in these racks.\n\nIf config_ref:enable_rack_awareness,true,properties/cluster-properties[] is set to `false`, leader pinning is disabled across the cluster.",
"related_topics": [
"xref:develop:produce-data/leader-pinning.adoc[Leader pinning]"
],
"config_scope": "cluster"
},
"default_redpanda_storage_mode": {
"description": "Set the default storage mode for new topics. This value applies to any topic created without an explicit <<redpandastoragemode,`redpanda.storage.mode`>> setting (that is, when the topic's `redpanda.storage.mode` is `unset`).\n\nAccepted values:\n\n* `unset`: Defer to the legacy <<cloud_storage_enable_remote_read,`redpanda.remote.read`>> and <<cloud_storage_enable_remote_write,`redpanda.remote.write`>> topic properties for Tiered Storage configuration.\n* `local`: Store data only on local disks, with no object storage involvement.\nifndef::env-cloud[]\n* `tiered`: Store data on local disks and replicate it to object storage using xref:manage:tiered-storage.adoc[Tiered Storage]. Equivalent to setting `redpanda.remote.read` and `redpanda.remote.write` to `true`.\n* `cloud`: Store data primarily in object storage using xref:develop:manage-topics/cloud-topics.adoc[Cloud Topics].\nendif::[]\nifdef::env-cloud[]\n* `tiered`: Store data on local disks and replicate it to object storage using Tiered Storage. Equivalent to setting `redpanda.remote.read` and `redpanda.remote.write` to `true`.\n* `cloud`: Store data primarily in object storage using Cloud Topics.\nendif::[]",
"related_topics": [
"self-managed-only: xref:manage:tiered-storage.adoc[Tiered Storage]",
"self-managed-only: xref:develop:manage-topics/cloud-topics.adoc[Manage Cloud Topics]",
"cloud-only: xref:develop:topics/cloud-topics.adoc[Manage Cloud Topics]"
],
"config_scope": "cluster",
"version": "v26.1.1"
},
"delete.retention.ms": {
"description": "The retention time for tombstone records in a compacted topic. Redpanda removes tombstone records after the retention limit is exceeded.\n\nIf you have enabled Tiered Storage and set <<redpandaremoteread,`redpanda.remote.read`>> or <<redpandaremotewrite,`redpanda.remote.write`>> for the topic, you cannot enable tombstone removal.\n\nIf both `delete.retention.ms` and the cluster property config_ref:tombstone_retention_ms,true,properties/cluster-properties[] are set, `delete.retention.ms` overrides the cluster level tombstone retention for an individual topic.\n\nThis property supports three states:\n\n* Positive value: Sets the milliseconds to retain tombstone records before removal.\n* 0: Tombstone records are immediately eligible for removal.\n* Negative value: Disables tombstone removal entirely for this topic.",
"related_topics": [
"xref:reference:properties/cluster-properties.adoc#tombstone_retention_ms[`tombstone_retention_ms`]",
"xref:manage:cluster-maintenance/compaction-settings.adoc#tombstone-record-removal[Tombstone record removal]"
],
"config_scope": "topic"
},
"delete_topic_enable": {
"version": "v26.1.1"
},
"developer_mode": {
"description": "CAUTION: Enabling `developer_mode` isn't recommended for production use.\n\nEnable developer mode, which skips most of the checks performed at startup.",
"config_scope": "broker",
"category": "redpanda"
},
"disable_cluster_recovery_loop_for_tests": {
"description": "include::reference:partial$internal-use-property.adoc[]\n\nDisables the cluster recovery loop. This property is used to simplify testing and should not be set in production.",
"config_scope": "cluster"
},
"disk_reservation_percent": {
"description": "The percentage of total disk capacity that Redpanda will avoid using. This applies both when cloud cache and log data share a disk, as well \nas when cloud cache uses a dedicated disk. \n\nIt is recommended to not run disks near capacity to avoid blocking I/O due to low disk space, as well as avoiding performance issues associated with SSD garbage collection.",
"config_scope": "cluster"
},
"election_timeout_ms": {
"description": "Raft election timeout expressed in milliseconds.",
"config_scope": "cluster"
},
"emergency_disable_data_transforms": {
"description": "Override the cluster property xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`] and disable Wasm-powered data transforms. This is an emergency shutoff button.",
"related_topics": [
"xref:reference:properties/cluster-properties.adoc#data_transforms_enabled[`data_transforms_enabled`]"
],
"config_scope": "broker",
"category": "redpanda"
},
"empty_seed_starts_cluster": {
"description": "Controls how a new cluster is formed. All brokers in a cluster must have the same value.\n\n<<seed_servers,See how the `empty_seed_starts_cluster` broker property works with the `seed_servers` broker property>> to form a cluster.\n\nTIP: For backward compatibility, `true` is the default. Redpanda recommends using `false` in production environments to prevent accidental cluster formation.",
"config_scope": "broker",
"category": "redpanda"
},
"enable_cluster_metadata_upload_loop": {
"description": "Enables cluster metadata uploads. Required for xref:manage:whole-cluster-restore.adoc[whole cluster restore].",
"related_topics": [
"xref:manage:whole-cluster-restore.adoc[whole cluster restore]"
],
"config_scope": "cluster"
},
"enable_consumer_group_metrics": {
"description": "List of enabled consumer group metrics. Accepted values include:\n\n- `group`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`] metrics.\n- `partition`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`] metric.\n- `consumer_lag`: Enables the xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`] and xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`] metrics\n+\nEnabling `consumer_lag` may add a small amount of additional processing overhead to the brokers, especially in environments with a high number of consumer groups or partitions.\n+",
"related_topics": [
"xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_consumers[`redpanda_kafka_consumer_group_consumers`]",
"xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_topics[`redpanda_kafka_consumer_group_topics`]",
"xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_committed_offset[`redpanda_kafka_consumer_group_committed_offset`]",
"xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_max[`redpanda_kafka_consumer_group_lag_max`]",
"xref:reference:public-metrics-reference.adoc#redpanda_kafka_consumer_group_lag_sum[`redpanda_kafka_consumer_group_lag_sum`]",
"xref:reference:properties/cluster-properties.adoc#consumer_group_lag_collection_interval_sec[`consumer_group_lag_collection_interval_sec`]",
"self-managed-only: xref:manage:monitoring.adoc#consumers[Monitor consumer group lag]",
"cloud-only: xref:manage:monitor-cloud.adoc#consumers[Monitor consumer group lag]"
],
"config_scope": "cluster"
},
"enable_developmental_unrecoverable_data_corrupting_features": {
"config_scope": "cluster",
"exclude_from_docs": true
},
"enable_host_metrics": {
"description": "Enable exporting of some host metrics like `/proc/diskstats`, `/proc/snmp` and `/proc/net/netstat`.\n\nHost metrics are prefixed with xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`] and are available on the `/metrics` endpoint.",
"related_topics": [
"xref:reference:internal-metrics-reference.adoc#vectorized_host_diskstats_discards[`vectorized_host`]"
],
"config_scope": "cluster"
},
"enable_metrics_reporter": {
"description": "Enable the cluster metrics reporter. If `true`, the metrics reporter collects and exports to Redpanda Data a set of customer usage metrics at the interval set by <<metrics_reporter_report_interval,`metrics_reporter_report_interval`>>.\n\n[NOTE]\n====\nThe cluster metrics of the metrics reporter are different from xref:manage:monitoring.adoc[monitoring metrics].\n\n* The metrics reporter exports customer usage metrics for consumption by Redpanda Data.\n* Monitoring metrics are exported for consumption by Redpanda users.\n====",
"related_topics": [
"xref:manage:monitoring.adoc[monitoring metrics]"
],
"config_scope": "cluster"
},
"enable_sasl": {
"description": "Enable SASL authentication for Kafka connections. Authorization is required to modify this property. See also <<kafka_enable_authorization,`kafka_enable_authorization`>>.",
"config_scope": "cluster"
},
"enable_schema_id_validation": {
"related_topics": [
"xref:manage:schema-reg/schema-id-validation.adoc[Server-Side Schema ID Validation]"
],
"description": "Controls whether Redpanda validates schema IDs in records and which topic properties are enforced.\n\nValues:\n\n* `none`: Schema validation is disabled (no schema ID checks are done). Associated topic properties cannot be modified.\n* `redpanda`: Schema validation is enabled. Only Redpanda topic properties are accepted.\n* `compat`: Schema validation is enabled. Both Redpanda and compatible topic properties are accepted.",
"config_scope": "cluster"
},
"enable_shadow_linking": {
"description": "Enable creating shadow links from this cluster to a remote source cluster for data replication.",
"config_scope": "cluster"
},
"fetch_max_read_concurrency": {
"version": "v25.3.3"
},
"fetch_read_strategy": {
"description": "The strategy used to fulfill fetch requests.\n\n* `polling`: If `fetch_reads_debounce_timeout` is set to its default value, then this acts exactly like `non_polling`; otherwise, it acts like `non_polling_with_debounce` (deprecated).\n* `non_polling`: The backend is signaled when a partition has new data, so Redpanda does not need to repeatedly read from every partition in the fetch. Redpanda Data recommends using this value for most workloads, because it can improve fetch latency and CPU utilization.\n* `non_polling_with_debounce`: This option behaves like `non_polling`, but it includes a debounce mechanism with a fixed delay specified by `fetch_reads_debounce_timeout` at the start of each fetch. By introducing this delay, Redpanda can accumulate more data before processing, leading to fewer fetch operations and returning larger amounts of data. Enabling this option reduces reactor utilization, but it may also increase end-to-end latency.",
"config_scope": "cluster"
},
"fips_mode": {
"config_scope": "broker",
"category": "redpanda",
"description": "Controls whether Redpanda starts in FIPS mode. This property allows for three values: \n\n* Disabled - Redpanda does not start in FIPS mode.\n\n* Permissive - Redpanda performs the same check as enabled, but a warning is logged, and Redpanda continues to run. Redpanda loads the OpenSSL FIPS provider into the OpenSSL library. After this completes, Redpanda is operating in FIPS mode, which means that the TLS cipher suites available to users are limited to the TLSv1.2 and TLSv1.3 NIST-approved cryptographic methods.\n\n* Enabled - Redpanda verifies that the operating system is enabled for FIPS by checking `/proc/sys/crypto/fips_enabled`. If the file does not exist or does not return `1`, Redpanda immediately exits."
},
"flush.bytes": {
"description": "The maximum bytes not fsynced per partition. If this configured threshold is reached, the log is automatically fsynced, even though it wasn't explicitly requested.",
"related_topics": [
"xref:reference:properties/cluster-properties.adoc#flush_bytes[`flush_bytes`]"
],
"config_scope": "topic"
},
"flush.ms": {
"description": "The maximum delay (in ms) between two subsequent fsyncs. After this delay, the log is automatically fsynced.",
"related_topics": [
"xref:reference:properties/cluster-properties.adoc#flush_ms[`flush_ms`]"
],
"config_scope": "topic"
},
"http_authentication": {
"description": "A list of supported HTTP authentication mechanisms. Accepted Values: `BASIC`, `OIDC`.",
"related_topics": [],
"config_scope": "cluster"
},
"iceberg_backlog_controller_i_coeff": {
"description": "Controls how much past backlog (unprocessed work) affects the priority of processing new data in the Iceberg system. The system accumulates backlog errors over time, and this coefficient determines how much that accumulated backlog influences the urgency of data translation.",
"config_scope": "cluster"
},
"iceberg_backlog_controller_p_coeff": {
"description": "Proportional coefficient for the Iceberg backlog controller. Number of shares assigned to the datalake scheduling group will be proportional to the backlog size error. A negative value means larger and faster changes in the number of shares in the datalake scheduling group.",
"config_scope": "cluster"
},
"iceberg_catalog_base_location": {
"description": "Base path for the object-storage-backed Iceberg catalog. After Iceberg is enabled, do not change this value.",
"config_scope": "cluster"
},
"iceberg_catalog_type": {
"description": "Iceberg catalog type that Redpanda will use to commit table metadata updates. Supported types: `rest`, `object_storage`.\nNOTE: You must set <<iceberg_rest_catalog_endpoint,`iceberg_rest_catalog_endpoint`>> at the same time that you set `iceberg_catalog_type` to `rest`.",
"config_scope": "cluster"
},
"iceberg_default_catalog_namespace": {
"description": "The default namespace (database name) for Iceberg tables. All tables created by Redpanda will be placed in this namespace within the Iceberg catalog. Supports nested namespaces as an array of strings.\n\nIMPORTANT: This value must be configured before enabling Iceberg and must not be changed afterward. Changing it will cause Redpanda to lose track of existing tables.",
"version": "v25.3.5"
},
"iceberg_default_partition_spec": {
"description": "Default value for the `redpanda.iceberg.partition.spec` topic property that determines the partition spec for the Iceberg table corresponding to the topic.",
"related_topics": [
"self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-partition-spec[`redpanda.iceberg.partition.spec`]",
"xref:manage:iceberg/about-iceberg-topics.adoc#enable-iceberg-integration[Enable Iceberg integration]"
],
"config_scope": "cluster"
},
"iceberg_delete": {
"description": "Default value for the `redpanda.iceberg.delete` topic property that determines if the corresponding Iceberg table is deleted upon deleting the topic.",
"config_scope": "cluster"
},
"iceberg_disable_automatic_snapshot_expiry": {
"description": "Whether to disable automatic Iceberg snapshot expiry. This property may be useful if the Iceberg catalog expects to perform snapshot expiry on its own.",
"config_scope": "cluster"
},
"iceberg_disable_snapshot_tagging": {
"description": "Whether to disable tagging of Iceberg snapshots. These tags are used to ensure that the snapshots that Redpanda writes are retained during snapshot removal, which in turn, helps Redpanda ensure exactly-once delivery of records. Disabling tags is therefore not recommended, but it may be useful if the Iceberg catalog does not support tags.",
"config_scope": "cluster"
},
"iceberg_dlq_table_suffix": {
"description": "The suffix added to Iceberg table names when creating dead-letter queue (DLQ) tables for invalid records. Choose a suffix that won't conflict with existing table names. This is especially important for catalogs that don't support the tilde (~) character in table names. Don't change this value after creating DLQ tables.",
"config_scope": "cluster"
},
"iceberg_enabled": {
"description": "Enables the translation of topic data into Iceberg tables. Setting `iceberg_enabled` to `true` activates the feature at the cluster level, but each topic must also set the `redpanda.iceberg.enabled` topic-level property to `true` to use it. If `iceberg_enabled` is set to `false`, then the feature is disabled for all topics in the cluster, overriding any topic-level settings.",
"related_topics": [
"self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-enabled[`redpanda.iceberg.enabled`]"
],
"config_scope": "cluster"
},
"iceberg_invalid_record_action": {
"description": "Default value for the `redpanda.iceberg.invalid.record.action` topic property.",
"related_topics": [
"self-managed-only: xref:reference:properties/topic-properties.adoc#redpanda-iceberg-invalid-record-action[`redpanda.iceberg.invalid.record.action`]",
"self-managed-only: xref:manage:iceberg/about-iceberg-topics.adoc#troubleshoot-errors[Troubleshoot errors]"
],
"config_scope": "cluster"
},
"iceberg_latest_schema_cache_ttl_ms": {
"description": "The TTL for caching the latest schema during translation when using the xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`] iceberg mode. This setting controls how long the latest schema remains cached during translation, which affects schema refresh behavior and performance.",
"related_topics": [
"xref:manage:iceberg/specify-iceberg-schema.adoc#value_schema_latest[`value_schema_latest`]"
],
"config_scope": "cluster"
},
"iceberg_rest_catalog_authentication_mode": {
"description": "The authentication mode for client requests made to the Iceberg catalog. Choose from: `none`, `bearer`, `oauth2`, and `aws_sigv4`. In `bearer` mode, the token specified in `iceberg_rest_catalog_token` is used unconditonally, and no attempts are made to refresh the token. In `oauth2` mode, the credentials specified in `iceberg_rest_catalog_client_id` and `iceberg_rest_catalog_client_secret` are used to obtain a bearer token from the URI defined by `iceberg_rest_catalog_oauth2_server_uri`. In `aws_sigv4` mode, the same AWS credentials used for cloud storage (see `cloud_storage_region`, `cloud_storage_access_key`, `cloud_storage_secret_key`, and `cloud_storage_credentials_source`) are used to sign requests to AWS Glue catalog with SigV4.",
"config_scope": "cluster"
},
"iceberg_rest_catalog_aws_access_key": {
"description": "AWS access key for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`] when using aws_sigv4 authentication mode.",
"related_topics": [
"xref:reference:properties/object-storage-properties.adoc#cloud_storage_access_key[`cloud_storage_access_key`]"
],
"config_scope": "cluster"
},
"iceberg_rest_catalog_aws_credentials_source": {
"description": "*Accepted values*: `aws_instance_metadata`, `azure_aks_oidc_federation`, `azure_vm_instance_metadata`, `config_file`, `gcp_instance_metadata`, `sts`.",
"related_topics": [
"xref:reference:properties/object-storage-properties.adoc#cloud_storage_credentials_source[`cloud_storage_credentials_source`]"
],
"config_scope": "cluster"
},
"iceberg_rest_catalog_aws_region": {
"description": "AWS region for Iceberg REST catalog SigV4 authentication. If not set, falls back to xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`] when using aws_sigv4 authentication mode.",
"related_topics": [
"xref:reference:properties/object-storage-properties.adoc#cloud_storage_region[`cloud_storage_region`]"