From 8a9df652d3c4d9fd6d00c20286fb4a39c7d8bd47 Mon Sep 17 00:00:00 2001 From: Liquan Pei Date: Wed, 18 Mar 2020 02:33:58 -0700 Subject: [PATCH 1/4] Add demo tiflash docker compose file --- config/tiflash-learner.toml | 45 +++++++++ config/tiflash.toml | 79 ++++++++++++++++ docker-compose-tiflash.yml | 183 ++++++++++++++++++++++++++++++++++++ 3 files changed, 307 insertions(+) create mode 100644 config/tiflash-learner.toml create mode 100644 config/tiflash.toml create mode 100644 docker-compose-tiflash.yml diff --git a/config/tiflash-learner.toml b/config/tiflash-learner.toml new file mode 100644 index 0000000..6f15c4c --- /dev/null +++ b/config/tiflash-learner.toml @@ -0,0 +1,45 @@ +log-file = "/logs/tiflash_tikv.log" + +[readpool] + +[readpool.coprocessor] + +[readpool.storage] + +[server] +engine-addr = "0.0.0.0:4030" +addr = "0.0.0.0:20280" +advertise-addr = "0.0.0.0:20280" +status-addr = "0.0.0.0:20292" + +[storage] +data-dir = "/data/flash" + +[pd] + +[metric] + +[raftstore] +capacity = "100GB" + +[coprocessor] + +[rocksdb] +wal-dir = "" + +[rocksdb.defaultcf] + +[rocksdb.lockcf] + +[rocksdb.writecf] + +[raftdb] + +[raftdb.defaultcf] + +[security] +ca-path = "" +cert-path = "" +key-path = "" + +[import] diff --git a/config/tiflash.toml b/config/tiflash.toml new file mode 100644 index 0000000..a28408e --- /dev/null +++ b/config/tiflash.toml @@ -0,0 +1,79 @@ +default_profile = "default" +display_name = "TiFlash" +listen_host = "0.0.0.0" +mark_cache_size = 5368709120 +tmp_path = "/data/tmp" +path = "/data" +tcp_port = 9110 +http_port = 8223 + +[flash] +tidb_status_addr = "tidb:10080" +service_addr = "0.0.0.0:4030" + +[flash.flash_cluster] +cluster_manager_path = "/tiflash/flash_cluster_manager" +log = "/logs/tiflash_cluster_manager.log" +master_ttl = 60 +refresh_interval = 20 +update_rule_interval = 5 + +[flash.proxy] +config = "/tiflash-learner.toml" + +[status] +metrics_port = 8234 + +[logger] +errorlog = "/logs/tiflash_error.log" +log = "/logs/tiflash.log" +count = 20 +level = "debug" +size = "1000M" + +[application] +runAsDaemon = true + +[raft] +pd_addr = "pd0:2379,pd1:2379,pd2:2379" +storage_engine = "tmt" + +[quotas] + +[quotas.default] + +[quotas.default.interval] +duration = 3600 +errors = 0 +execution_time = 0 +queries = 0 +read_rows = 0 +result_rows = 0 + +[users] + +[users.default] +password = "" +profile = "default" +quota = "default" + +[users.default.networks] +ip = "::/0" + +[users.readonly] +password = "" +profile = "readonly" +quota = "default" + +[users.readonly.networks] +ip = "::/0" + +[profiles] + +[profiles.default] +load_balancing = "random" +max_memory_usage = 10000000000 +use_uncompressed_cache = 0 + +[profiles.readonly] +readonly = 1 \ No newline at end of file diff --git a/docker-compose-tiflash.yml b/docker-compose-tiflash.yml new file mode 100644 index 0000000..d5b6d78 --- /dev/null +++ b/docker-compose-tiflash.yml @@ -0,0 +1,183 @@ +version: '2.1' + +services: + pd0: + image: pingcap/pd:latest + ports: + - "2379" + volumes: + - ./config/pd.toml:/pd.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --name=pd0 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd0:2379 + - --advertise-peer-urls=http://pd0:2380 + - --initial-cluster=pd0=http://pd0:2380,pd1=http://pd1:2380,pd2=http://pd2:2380 + - --data-dir=/data/pd0 + - --config=/pd.toml + - --log-file=/logs/pd0.log + restart: on-failure + pd1: + image: pingcap/pd:latest + ports: + - "2379" + volumes: + - ./config/pd.toml:/pd.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --name=pd1 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd1:2379 + - --advertise-peer-urls=http://pd1:2380 + - --initial-cluster=pd0=http://pd0:2380,pd1=http://pd1:2380,pd2=http://pd2:2380 + - --data-dir=/data/pd1 + - --config=/pd.toml + - --log-file=/logs/pd1.log + restart: on-failure + pd2: + image: pingcap/pd:latest + ports: + - "2379" + volumes: + - ./config/pd.toml:/pd.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --name=pd2 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd2:2379 + - --advertise-peer-urls=http://pd2:2380 + - --initial-cluster=pd0=http://pd0:2380,pd1=http://pd1:2380,pd2=http://pd2:2380 + - --data-dir=/data/pd2 + - --config=/pd.toml + - --log-file=/logs/pd2.log + restart: on-failure + tikv0: + image: pingcap/tikv:latest + volumes: + - ./config/tikv.toml:/tikv.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv0:20160 + - --data-dir=/data/tikv0 + - --pd=pd0:2379,pd1:2379,pd2:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv0.log + depends_on: + - "pd0" + - "pd1" + - "pd2" + restart: on-failure + tikv1: + image: pingcap/tikv:latest + volumes: + - ./config/tikv.toml:/tikv.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv1:20160 + - --data-dir=/data/tikv1 + - --pd=pd0:2379,pd1:2379,pd2:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv1.log + depends_on: + - "pd0" + - "pd1" + - "pd2" + restart: on-failure + tikv2: + image: pingcap/tikv:latest + volumes: + - ./config/tikv.toml:/tikv.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv2:20160 + - --data-dir=/data/tikv2 + - --pd=pd0:2379,pd1:2379,pd2:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv2.log + depends_on: + - "pd0" + - "pd1" + - "pd2" + restart: on-failure + tidb: + image: pingcap/tidb:latest + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./config/tidb.toml:/tidb.toml:ro + - ./logs:/logs + command: + - --store=tikv + - --path=pd0:2379,pd1:2379,pd2:2379 + - --config=/tidb.toml + - --log-file=/logs/tidb.log + depends_on: + - "tikv0" + - "tikv1" + - "tikv2" + restart: on-failure + tiflash: + image: pingcap/tiflash:latest + volumes: + - ./config/tiflash.toml:/tiflash.toml:ro + - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --config=/tiflash.toml + depends_on: + - "tikv0" + - "tikv1" + - "tikv2" + - "tidb" + restart: on-failure + # monitors + pushgateway: + image: prom/pushgateway:v0.3.1 + command: + - --log.level=error + restart: on-failure + prometheus: + user: root + image: prom/prometheus:v2.2.1 + command: + - --log.level=error + - --storage.tsdb.path=/data/prometheus + - --config.file=/etc/prometheus/prometheus.yml + ports: + - "9090:9090" + volumes: + - ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - ./config/pd.rules.yml:/etc/prometheus/pd.rules.yml:ro + - ./config/tikv.rules.yml:/etc/prometheus/tikv.rules.yml:ro + - ./config/tidb.rules.yml:/etc/prometheus/tidb.rules.yml:ro + - ./data:/data + restart: on-failure + grafana: + image: grafana/grafana:6.0.1 + user: "0" + environment: + GF_LOG_LEVEL: error + GF_PATHS_PROVISIONING: /etc/grafana/provisioning + GF_PATHS_CONFIG: /etc/grafana/grafana.ini + volumes: + - ./config/grafana:/etc/grafana + - ./config/dashboards:/tmp/dashboards + - ./data/grafana:/var/lib/grafana + ports: + - "3000:3000" + restart: on-failure From b0a947f1158fe539d6be215e504b837d20ab12b2 Mon Sep 17 00:00:00 2001 From: marsishandsome Date: Wed, 18 Mar 2020 22:01:29 +0800 Subject: [PATCH 2/4] support tiflash --- config/pd.toml | 1 + config/tiflash-learner.toml | 6 +++--- config/tiflash.toml | 4 ++-- docker-compose-tiflash.yml | 16 ++++++++-------- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/config/pd.toml b/config/pd.toml index b1562a5..5b44697 100644 --- a/config/pd.toml +++ b/config/pd.toml @@ -78,6 +78,7 @@ max-replicas = 3 # For example, ["zone", "rack"] means that we should place replicas to # different zones first, then to different racks if we don't have enough zones. location-labels = [] +enable-placement-rules = true [label-property] # Do not assign region leaders to stores that have these tags. diff --git a/config/tiflash-learner.toml b/config/tiflash-learner.toml index 6f15c4c..3ae50a5 100644 --- a/config/tiflash-learner.toml +++ b/config/tiflash-learner.toml @@ -9,8 +9,8 @@ log-file = "/logs/tiflash_tikv.log" [server] engine-addr = "0.0.0.0:4030" addr = "0.0.0.0:20280" -advertise-addr = "0.0.0.0:20280" -status-addr = "0.0.0.0:20292" +advertise-addr = "tiflash:20280" +#status-addr = "tiflash:20292" [storage] data-dir = "/data/flash" @@ -20,7 +20,7 @@ data-dir = "/data/flash" [metric] [raftstore] -capacity = "100GB" +capacity = "10GB" [coprocessor] diff --git a/config/tiflash.toml b/config/tiflash.toml index a28408e..d21d6e3 100644 --- a/config/tiflash.toml +++ b/config/tiflash.toml @@ -9,7 +9,7 @@ http_port = 8223 [flash] tidb_status_addr = "tidb:10080" -service_addr = "0.0.0.0:4030" +service_addr = "tiflash:4030" [flash.flash_cluster] cluster_manager_path = "/tiflash/flash_cluster_manager" @@ -76,4 +76,4 @@ max_memory_usage = 10000000000 use_uncompressed_cache = 0 [profiles.readonly] -readonly = 1 \ No newline at end of file +readonly = 1 diff --git a/docker-compose-tiflash.yml b/docker-compose-tiflash.yml index d5b6d78..956ff4f 100644 --- a/docker-compose-tiflash.yml +++ b/docker-compose-tiflash.yml @@ -2,7 +2,7 @@ version: '2.1' services: pd0: - image: pingcap/pd:latest + image: pingcap/pd:nightly ports: - "2379" volumes: @@ -21,7 +21,7 @@ services: - --log-file=/logs/pd0.log restart: on-failure pd1: - image: pingcap/pd:latest + image: pingcap/pd:nightly ports: - "2379" volumes: @@ -40,7 +40,7 @@ services: - --log-file=/logs/pd1.log restart: on-failure pd2: - image: pingcap/pd:latest + image: pingcap/pd:nightly ports: - "2379" volumes: @@ -59,7 +59,7 @@ services: - --log-file=/logs/pd2.log restart: on-failure tikv0: - image: pingcap/tikv:latest + image: pingcap/tikv:nightly volumes: - ./config/tikv.toml:/tikv.toml:ro - ./data:/data @@ -77,7 +77,7 @@ services: - "pd2" restart: on-failure tikv1: - image: pingcap/tikv:latest + image: pingcap/tikv:nightly volumes: - ./config/tikv.toml:/tikv.toml:ro - ./data:/data @@ -95,7 +95,7 @@ services: - "pd2" restart: on-failure tikv2: - image: pingcap/tikv:latest + image: pingcap/tikv:nightly volumes: - ./config/tikv.toml:/tikv.toml:ro - ./data:/data @@ -113,7 +113,7 @@ services: - "pd2" restart: on-failure tidb: - image: pingcap/tidb:latest + image: pingcap/tidb:nightly ports: - "4000:4000" - "10080:10080" @@ -131,7 +131,7 @@ services: - "tikv2" restart: on-failure tiflash: - image: pingcap/tiflash:latest + image: pingcap/tiflash:nightly volumes: - ./config/tiflash.toml:/tiflash.toml:ro - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro From d006165bca7eb121c7ef6d5ed868d79361f4ecba Mon Sep 17 00:00:00 2001 From: Liquan Pei Date: Wed, 18 Mar 2020 15:55:36 -0700 Subject: [PATCH 3/4] Use the tidb.toml from master --- config/pd-nightly.toml | 111 ++++++++++++++++++ config/tidb.toml | 225 +++++++++++++++++++++++++++++++------ docker-compose-tiflash.yml | 36 +++--- 3 files changed, 320 insertions(+), 52 deletions(-) create mode 100644 config/pd-nightly.toml diff --git a/config/pd-nightly.toml b/config/pd-nightly.toml new file mode 100644 index 0000000..37dd06c --- /dev/null +++ b/config/pd-nightly.toml @@ -0,0 +1,111 @@ +# PD Configuration. + +name = "pd" +data-dir = "default.pd" + +client-urls = "http://127.0.0.1:2379" +## if not set, use ${client-urls} +advertise-client-urls = "" + +peer-urls = "http://127.0.0.1:2380" +## if not set, use ${peer-urls} +advertise-peer-urls = "" + +initial-cluster = "pd=http://127.0.0.1:2380" +initial-cluster-state = "new" + +lease = 3 +tso-save-interval = "3s" + +enable-prevote = true + +[security] +## Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "" +## Path of file that contains X509 certificate in PEM format. +cert-path = "" +## Path of file that contains X509 key in PEM format. +key-path = "" + +cert-allowed-cn = ["example.com"] + +[log] +level = "info" + +## log format, one of json, text, console +# format = "text" + +## disable automatic timestamps in output +# disable-timestamp = false + +# file logging +[log.file] +# filename = "" +## max log file size in MB +# max-size = 300 +## max log file keep days +# max-days = 28 +## maximum number of old log files to retain +# max-backups = 7 + +[metric] +# prometheus client push interval, set "0s" to disable prometheus. +interval = "15s" +# prometheus pushgateway address, leaves it empty will disable prometheus. +address = "pushgateway:9091" + +[pd-server] +## the metric storage is the cluster metric storage. This is use for query metric data. +## Currently we use prometheus as metric storage, we may use PD/TiKV as metric storage later. +## For usability, recommended to temporarily set it to the prometheus address, eg: http://127.0.0.1:9090 +metric-storage = "" + +[schedule] +max-merge-region-size = 20 +max-merge-region-keys = 200000 +split-merge-interval = "1h" +max-snapshot-count = 3 +max-pending-peer-count = 16 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 2048 +replica-schedule-limit = 64 +merge-schedule-limit = 8 +hot-region-schedule-limit = 4 +## There are some policies supported: ["count", "size"], default: "count" +# leader-schedule-policy = "count" +## When the score difference between the leader or Region of the two stores is +## less than specified multiple times of the Region size, it is considered in balance by PD. +## If it equals 0.0, PD will automatically adjust it. +# tolerant-size-ratio = 0.0 + +## This three parameters control the merge scheduler behavior. +## If it is true, it means a region can only be merged into the next region of it. +# enable-one-way-merge = false +## If it is true, it means two region within different tables can be merged. +## This option only works when key type is "table". +# enable-cross-table-merge = false + +## customized schedulers, the format is as below +## if empty, it will use balance-leader, balance-region, hot-region as default +# [[schedule.schedulers]] +# type = "evict-leader" +# args = ["1"] + +[replication] +## The number of replicas for each region. +max-replicas = 3 +## The label keys specified the location of a store. +## The placement priorities is implied by the order of label keys. +## For example, ["zone", "rack"] means that we should place replicas to +## different zones first, then to different racks if we don't have enough zones. +location-labels = [] +## Strictly checks if the label of TiKV is matched with location labels. +# strictly-match-label = false +enable-placement-rules = true + +[label-property] +## Do not assign region leaders to stores that have these tags. +# [[label-property.reject-leader]] +# key = "zone" +# value = "cn1 \ No newline at end of file diff --git a/config/tidb.toml b/config/tidb.toml index 9f881b4..eda9aef 100644 --- a/config/tidb.toml +++ b/config/tidb.toml @@ -3,6 +3,9 @@ # TiDB server host. host = "0.0.0.0" +# tidb server advertise IP. +advertise-address = "" + # TiDB server port. port = 4000 @@ -19,7 +22,7 @@ socket = "" run-ddl = true # Schema lease duration, very dangerous to change only if you know what you do. -lease = "0" +lease = "45s" # When create table, split a separated region for it. It is recommended to # turn off this option if there will be a large number of tables created. @@ -28,40 +31,111 @@ split-table = true # The limit of concurrent executed sessions. token-limit = 1000 -# Only print a log when out of memory quota. -# Valid options: ["log", "cancel"] -oom-action = "log" +# The maximum memory available for a single SQL statement. Default: 1GB +mem-quota-query = 1073741824 -# Set the memory quota for a query in bytes. Default: 32GB -mem-quota-query = 34359738368 +# Controls whether to enable the temporary storage for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. +oom-use-tmp-storage = true + +# Specifies the temporary storage path for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. +# It defaults to `/tidb/tmp-storage` if it is unset. +# It only takes effect when `oom-use-tmp-storage` is `true`. +# tmp-storage-path = "/tmp/tidb/tmp-storage" + +# Specifies what operation TiDB performs when a single SQL statement exceeds the memory quota specified by mem-quota-query and cannot be spilled over to disk. +# Valid options: ["log", "cancel"] +oom-action = "cancel" # Enable coprocessor streaming. enable-streaming = false +# Enable batch commit for the DMLs. +enable-batch-dml = false + # Set system variable 'lower_case_table_names' lower-case-table-names = 2 +# Make "kill query" behavior compatible with MySQL. It's not recommend to +# turn on this option when TiDB server is behind a proxy. +compatible-kill-query = false + +# check mb4 value in utf8 is used to control whether to check the mb4 characters when the charset is utf8. +check-mb4-value-in-utf8 = true + +# treat-old-version-utf8-as-utf8mb4 use for upgrade compatibility. Set to true will treat old version table/column UTF8 charset as UTF8MB4. +treat-old-version-utf8-as-utf8mb4 = true + +# max-index-length is used to deal with compatibility issues from v3.0.7 and previous version upgrades. It can only be in [3072, 3072*4]. +max-index-length = 3072 + +# enable-table-lock is used to control table lock feature. Default is false, indicate the table lock feature is disabled. +enable-table-lock = false + +# delay-clean-table-lock is used to control the time (Milliseconds) of delay before unlock the table in the abnormal situation. +delay-clean-table-lock = 0 + +# Maximum number of the splitting region, which is used by the split region statement. +split-region-max-num = 1000 + +# enable the TiDB to fetch configs from PD and update itself during runtime. +# see https://github.com/pingcap/tidb/pull/13660 for more details. +enable-dynamic-config = true + +# alter-primary-key is used to control alter primary key feature. Default is false, indicate the alter primary key feature is disabled. +# If it is true, we can add the primary key by "alter table". However, if a table already exists before the switch is turned true and the data type of its primary key column is an integer, +# the primary key cannot be dropped. +alter-primary-key = false + +# server-version is used to change the version string of TiDB in the following scenarios: +# 1. the server version returned by builtin-function `VERSION()`. +# 2. the server version filled in handshake packets of MySQL Connection Protocol, see https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake for more details. +# if server-version = "", the default value(original TiDB version string) is used. +server-version = "" + +# repair mode is used to repair the broken table meta in TiKV in extreme cases. +repair-mode = false + +# Repair table list is used to list the tables in repair mode with the format like ["db.table",]. +# In repair mode, repairing table which is not in repair list will get wrong database or wrong table error. +repair-table-list = [] + +# The maximum permitted number of simultaneous client connections. +max-server-connections = 4096 + +# Whether new collations are enabled, as indicated by its name, this configuration entry take effect ONLY when a TiDB cluster bootstraps for the first time. +new_collations_enabled_on_first_bootstrap = false + [log] # Log level: debug, info, warn, error, fatal. -level = "error" +level = "info" # Log format, one of json, text, console. format = "text" -# Disable automatic timestamp in output -disable-timestamp = false +# Enable automatic timestamps in log output, if not set, it will be defaulted to true. +# enable-timestamp = true + +# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false. +# enable-error-stack = false + +# Whether to enable slow query log. +enable-slow-log = true # Stores slow query log into separated files. -slow-query-file = "" +slow-query-file = "tidb-slow.log" # Queries with execution time greater than this value will be logged. (Milliseconds) slow-threshold = 300 +# record-plan-in-slow-log is used to enable record query plan in slow log. +# 0 is disable. 1 is enable. +record-plan-in-slow-log = 1 + # Queries with internal result greater than this value will be logged. expensive-threshold = 10000 # Maximum query length recorded in log. -query-log-max-len = 2048 +query-log-max-len = 4096 # File logging. [log.file] @@ -77,9 +151,6 @@ max-days = 0 # Maximum number of old log files to retain. No clean up by default. max-backups = 0 -# Rotate log by day -log-rotate = true - [security] # Path of file that contains list of trusted SSL CAs for connection with mysql client. ssl-ca = "" @@ -103,27 +174,37 @@ cluster-ssl-key = "" # If enable status report HTTP service. report-status = true +# TiDB status host. +status-host = "tidb" + +## status-host is the HTTP address for reporting the internal status of a TiDB server, for example: +## API for prometheus: http://${status-host}:${status_port}/metrics +## API for pprof: http://${status-host}:${status_port}/debug/pprof # TiDB status port. status-port = 10080 -# Prometheus pushgateway address, leaves it empty will disable prometheus push. -metrics-addr = "pushgateway:9091" +# Prometheus pushgateway address, leaves it empty will disable push to pushgateway. +metrics-addr = "" -# Prometheus client push interval in second, set \"0\" to disable prometheus push. +# Prometheus client push interval in second, set \"0\" to disable push to pushgateway. metrics-interval = 15 +# Record statements qps by database name if it is enabled. +record-db-qps = false + [performance] # Max CPUs to use, 0 use number of CPUs in the machine. max-procs = 0 + +# Max memory size to use, 0 use the total usable memory in the machine. +max-memory = 0 + # StmtCountLimit limits the max count of statement inside a transaction. stmt-count-limit = 5000 # Set keep alive option for tcp connection. tcp-keep-alive = true -# The maximum number of retries when commit a transaction. -retry-limit = 10 - # Whether support cartesian product. cross-join = true @@ -133,15 +214,28 @@ stats-lease = "3s" # Run auto analyze worker on this tidb-server. run-auto-analyze = true -# Probability to use the query feedback to update stats, 0 or 1 for always false/true. -feedback-probability = 0.0 +# Probability to use the query feedback to update stats, 0.0 or 1.0 for always false/true. +feedback-probability = 0.05 # The max number of query feedback that cache in memory. query-feedback-limit = 1024 # Pseudo stats will be used if the ratio between the modify count and # row count in statistics of a table is greater than it. -pseudo-estimate-ratio = 0.7 +pseudo-estimate-ratio = 0.8 + +# Force the priority of all statements in a specified priority. +# The value could be "NO_PRIORITY", "LOW_PRIORITY", "HIGH_PRIORITY" or "DELAYED". +force-priority = "NO_PRIORITY" + +# Bind info lease duration, which influences the duration of loading bind info and handling invalid bind. +bind-info-lease = "3s" + +# The limitation of the size in byte for the entries in one transaction. +# If using TiKV as the storage, the entry represents a key/value pair. +# NOTE: If binlog is enabled, this value should be less than 104857600(10M) because this is the maximum size that can be handled by Pumper. +# If binlog is not enabled, this value should be less than 10737418240(10G). +txn-total-size-limit = 104857600 [proxy-protocol] # PROXY protocol acceptable client networks. @@ -151,14 +245,10 @@ networks = "" # PROXY protocol header read timeout, unit is second header-timeout = 5 -[plan-cache] -enabled = false -capacity = 2560 -shards = 256 - [prepared-plan-cache] enabled = false capacity = 100 +memory-guard-ratio = 0.1 [opentracing] # Enable opentracing. @@ -213,7 +303,7 @@ local-agent-host-port = "" [tikv-client] # Max gRPC connections that will be established with each tikv-server. -grpc-connection-count = 16 +grpc-connection-count = 4 # After a duration of this time in seconds if the client doesn't see any activity it pings # the server to see if the transport is still alive. @@ -223,17 +313,84 @@ grpc-keepalive-time = 10 # and if no activity is seen even after that the connection is closed. grpc-keepalive-timeout = 3 -# max time for commit command, must be twice bigger than raft election timeout. +# Max time for commit command, must be twice bigger than raft election timeout. commit-timeout = "41s" -[binlog] +# Max batch size in gRPC. +max-batch-size = 128 +# Overload threshold of TiKV. +overload-threshold = 200 +# Max batch wait time in nanosecond to avoid waiting too long. 0 means disable this feature. +max-batch-wait-time = 0 +# Batch wait size, to avoid waiting too long. +batch-wait-size = 8 + +# Enable chunk encoded data for coprocessor requests. +enable-chunk-rpc = true + +# If a Region has not been accessed for more than the given duration (in seconds), it +# will be reloaded from the PD. +region-cache-ttl = 600 + +# store-limit is used to restrain TiDB from sending request to some stores which is up to the limit. +# If a store has been up to the limit, it will return error for the successive request in same store. +# default 0 means shutting off store limit. +store-limit = 0 + +[txn-local-latches] +# Enable local latches for transactions. Enable it when +# there are lots of conflicts between transactions. +enabled = false +capacity = 2048000 -# Socket file to write binlog. -binlog-socket = "" +[binlog] +# enable to write binlog. +# NOTE: If binlog is enabled, txn-total-size-limit should be less than 104857600(10M). +enable = false # WriteTimeout specifies how long it will wait for writing binlog to pump. write-timeout = "15s" -# If IgnoreError is true, when writting binlog meets error, TiDB would stop writting binlog, +# If IgnoreError is true, when writing binlog meets error, TiDB would stop writing binlog, # but still provide service. ignore-error = false + +# use socket file to write binlog, for compatible with kafka version tidb-binlog. +binlog-socket = "" + +# the strategy for sending binlog to pump, value can be "range" or "hash" now. +strategy = "range" + +[pessimistic-txn] +# enable pessimistic transaction. +enable = true + +# max retry count for a statement in a pessimistic transaction. +max-retry-count = 256 + +[stmt-summary] +# enable statement summary. +enable = true + +# max number of statements kept in memory. +max-stmt-count = 200 + +# max length of displayed normalized sql and sample sql. +max-sql-length = 4096 + +# the refresh interval of statement summary, it's counted in seconds. +refresh-interval = 1800 + +# the maximum history size of statement summary. +history-size = 24 + +# experimental section controls the features that are still experimental: their semantics, +# interfaces are subject to change, using these features in the production environment is not recommended. +[experimental] +# enable column attribute `auto_random` to be defined on the primary key column. +allow-auto-random = false + +# server level isolation read by engines and labels +[isolation-read] +# engines means allow the tidb server read data from which types of engines. options: "tikv", "tiflash", "tidb". +engines = ["tikv", "tiflash", "tidb"] \ No newline at end of file diff --git a/docker-compose-tiflash.yml b/docker-compose-tiflash.yml index 956ff4f..1c1f194 100644 --- a/docker-compose-tiflash.yml +++ b/docker-compose-tiflash.yml @@ -6,7 +6,7 @@ services: ports: - "2379" volumes: - - ./config/pd.toml:/pd.toml:ro + - ./config/pd-nightly.toml:/pd.toml:ro - ./data:/data - ./logs:/logs command: @@ -25,7 +25,7 @@ services: ports: - "2379" volumes: - - ./config/pd.toml:/pd.toml:ro + - ./config/pd-nightly.toml:/pd.toml:ro - ./data:/data - ./logs:/logs command: @@ -44,7 +44,7 @@ services: ports: - "2379" volumes: - - ./config/pd.toml:/pd.toml:ro + - ./config/pd-nightly.toml:/pd.toml:ro - ./data:/data - ./logs:/logs command: @@ -131,20 +131,20 @@ services: - "tikv2" restart: on-failure tiflash: - image: pingcap/tiflash:nightly - volumes: - - ./config/tiflash.toml:/tiflash.toml:ro - - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro - - ./data:/data - - ./logs:/logs - command: - - --config=/tiflash.toml - depends_on: - - "tikv0" - - "tikv1" - - "tikv2" - - "tidb" - restart: on-failure + image: pingcap/tiflash:nightly + volumes: + - ./config/tiflash.toml:/tiflash.toml:ro + - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --config=/tiflash.toml + depends_on: + - "tikv0" + - "tikv1" + - "tikv2" + - "tidb" + restart: on-failure # monitors pushgateway: image: prom/pushgateway:v0.3.1 @@ -159,7 +159,7 @@ services: - --storage.tsdb.path=/data/prometheus - --config.file=/etc/prometheus/prometheus.yml ports: - - "9090:9090" + - "9091:9091" volumes: - ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro - ./config/pd.rules.yml:/etc/prometheus/pd.rules.yml:ro From 9798bb8ef5d2ff3029254eb72ac6cf5c619fe82d Mon Sep 17 00:00:00 2001 From: Liquan Pei Date: Wed, 18 Mar 2020 22:08:13 -0700 Subject: [PATCH 4/4] Add docker compose for TiFlash --- config/pd-nightly.toml | 2 +- config/tidb-nightly.toml | 396 ++++++++++++++++++ config/tidb.toml | 225 ++-------- ...rner.toml => tiflash-learner-nightly.toml} | 2 +- config/{tiflash.toml => tiflash-nightly.toml} | 2 +- docker-compose-tiflash-nightly.yml | 68 +++ docker-compose-tiflash.yml | 183 -------- 7 files changed, 501 insertions(+), 377 deletions(-) create mode 100644 config/tidb-nightly.toml rename config/{tiflash-learner.toml => tiflash-learner-nightly.toml} (94%) rename config/{tiflash.toml => tiflash-nightly.toml} (96%) create mode 100644 docker-compose-tiflash-nightly.yml delete mode 100644 docker-compose-tiflash.yml diff --git a/config/pd-nightly.toml b/config/pd-nightly.toml index 37dd06c..4d64b50 100644 --- a/config/pd-nightly.toml +++ b/config/pd-nightly.toml @@ -94,7 +94,7 @@ hot-region-schedule-limit = 4 [replication] ## The number of replicas for each region. -max-replicas = 3 +max-replicas = 1 ## The label keys specified the location of a store. ## The placement priorities is implied by the order of label keys. ## For example, ["zone", "rack"] means that we should place replicas to diff --git a/config/tidb-nightly.toml b/config/tidb-nightly.toml new file mode 100644 index 0000000..eda9aef --- /dev/null +++ b/config/tidb-nightly.toml @@ -0,0 +1,396 @@ +# TiDB Configuration. + +# TiDB server host. +host = "0.0.0.0" + +# tidb server advertise IP. +advertise-address = "" + +# TiDB server port. +port = 4000 + +# Registered store name, [tikv, mocktikv] +store = "mocktikv" + +# TiDB storage path. +path = "/tmp/tidb" + +# The socket file to use for connection. +socket = "" + +# Run ddl worker on this tidb-server. +run-ddl = true + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "45s" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true + +# The limit of concurrent executed sessions. +token-limit = 1000 + +# The maximum memory available for a single SQL statement. Default: 1GB +mem-quota-query = 1073741824 + +# Controls whether to enable the temporary storage for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. +oom-use-tmp-storage = true + +# Specifies the temporary storage path for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. +# It defaults to `/tidb/tmp-storage` if it is unset. +# It only takes effect when `oom-use-tmp-storage` is `true`. +# tmp-storage-path = "/tmp/tidb/tmp-storage" + +# Specifies what operation TiDB performs when a single SQL statement exceeds the memory quota specified by mem-quota-query and cannot be spilled over to disk. +# Valid options: ["log", "cancel"] +oom-action = "cancel" + +# Enable coprocessor streaming. +enable-streaming = false + +# Enable batch commit for the DMLs. +enable-batch-dml = false + +# Set system variable 'lower_case_table_names' +lower-case-table-names = 2 + +# Make "kill query" behavior compatible with MySQL. It's not recommend to +# turn on this option when TiDB server is behind a proxy. +compatible-kill-query = false + +# check mb4 value in utf8 is used to control whether to check the mb4 characters when the charset is utf8. +check-mb4-value-in-utf8 = true + +# treat-old-version-utf8-as-utf8mb4 use for upgrade compatibility. Set to true will treat old version table/column UTF8 charset as UTF8MB4. +treat-old-version-utf8-as-utf8mb4 = true + +# max-index-length is used to deal with compatibility issues from v3.0.7 and previous version upgrades. It can only be in [3072, 3072*4]. +max-index-length = 3072 + +# enable-table-lock is used to control table lock feature. Default is false, indicate the table lock feature is disabled. +enable-table-lock = false + +# delay-clean-table-lock is used to control the time (Milliseconds) of delay before unlock the table in the abnormal situation. +delay-clean-table-lock = 0 + +# Maximum number of the splitting region, which is used by the split region statement. +split-region-max-num = 1000 + +# enable the TiDB to fetch configs from PD and update itself during runtime. +# see https://github.com/pingcap/tidb/pull/13660 for more details. +enable-dynamic-config = true + +# alter-primary-key is used to control alter primary key feature. Default is false, indicate the alter primary key feature is disabled. +# If it is true, we can add the primary key by "alter table". However, if a table already exists before the switch is turned true and the data type of its primary key column is an integer, +# the primary key cannot be dropped. +alter-primary-key = false + +# server-version is used to change the version string of TiDB in the following scenarios: +# 1. the server version returned by builtin-function `VERSION()`. +# 2. the server version filled in handshake packets of MySQL Connection Protocol, see https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake for more details. +# if server-version = "", the default value(original TiDB version string) is used. +server-version = "" + +# repair mode is used to repair the broken table meta in TiKV in extreme cases. +repair-mode = false + +# Repair table list is used to list the tables in repair mode with the format like ["db.table",]. +# In repair mode, repairing table which is not in repair list will get wrong database or wrong table error. +repair-table-list = [] + +# The maximum permitted number of simultaneous client connections. +max-server-connections = 4096 + +# Whether new collations are enabled, as indicated by its name, this configuration entry take effect ONLY when a TiDB cluster bootstraps for the first time. +new_collations_enabled_on_first_bootstrap = false + +[log] +# Log level: debug, info, warn, error, fatal. +level = "info" + +# Log format, one of json, text, console. +format = "text" + +# Enable automatic timestamps in log output, if not set, it will be defaulted to true. +# enable-timestamp = true + +# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false. +# enable-error-stack = false + +# Whether to enable slow query log. +enable-slow-log = true + +# Stores slow query log into separated files. +slow-query-file = "tidb-slow.log" + +# Queries with execution time greater than this value will be logged. (Milliseconds) +slow-threshold = 300 + +# record-plan-in-slow-log is used to enable record query plan in slow log. +# 0 is disable. 1 is enable. +record-plan-in-slow-log = 1 + +# Queries with internal result greater than this value will be logged. +expensive-threshold = 10000 + +# Maximum query length recorded in log. +query-log-max-len = 4096 + +# File logging. +[log.file] +# Log file name. +filename = "" + +# Max log file size in MB (upper limit to 4096MB). +max-size = 300 + +# Max log file keep days. No clean up by default. +max-days = 0 + +# Maximum number of old log files to retain. No clean up by default. +max-backups = 0 + +[security] +# Path of file that contains list of trusted SSL CAs for connection with mysql client. +ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with mysql client. +ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with mysql client. +ssl-key = "" + +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "" + +[status] +# If enable status report HTTP service. +report-status = true + +# TiDB status host. +status-host = "tidb" + +## status-host is the HTTP address for reporting the internal status of a TiDB server, for example: +## API for prometheus: http://${status-host}:${status_port}/metrics +## API for pprof: http://${status-host}:${status_port}/debug/pprof +# TiDB status port. +status-port = 10080 + +# Prometheus pushgateway address, leaves it empty will disable push to pushgateway. +metrics-addr = "" + +# Prometheus client push interval in second, set \"0\" to disable push to pushgateway. +metrics-interval = 15 + +# Record statements qps by database name if it is enabled. +record-db-qps = false + +[performance] +# Max CPUs to use, 0 use number of CPUs in the machine. +max-procs = 0 + +# Max memory size to use, 0 use the total usable memory in the machine. +max-memory = 0 + +# StmtCountLimit limits the max count of statement inside a transaction. +stmt-count-limit = 5000 + +# Set keep alive option for tcp connection. +tcp-keep-alive = true + +# Whether support cartesian product. +cross-join = true + +# Stats lease duration, which influences the time of analyze and stats load. +stats-lease = "3s" + +# Run auto analyze worker on this tidb-server. +run-auto-analyze = true + +# Probability to use the query feedback to update stats, 0.0 or 1.0 for always false/true. +feedback-probability = 0.05 + +# The max number of query feedback that cache in memory. +query-feedback-limit = 1024 + +# Pseudo stats will be used if the ratio between the modify count and +# row count in statistics of a table is greater than it. +pseudo-estimate-ratio = 0.8 + +# Force the priority of all statements in a specified priority. +# The value could be "NO_PRIORITY", "LOW_PRIORITY", "HIGH_PRIORITY" or "DELAYED". +force-priority = "NO_PRIORITY" + +# Bind info lease duration, which influences the duration of loading bind info and handling invalid bind. +bind-info-lease = "3s" + +# The limitation of the size in byte for the entries in one transaction. +# If using TiKV as the storage, the entry represents a key/value pair. +# NOTE: If binlog is enabled, this value should be less than 104857600(10M) because this is the maximum size that can be handled by Pumper. +# If binlog is not enabled, this value should be less than 10737418240(10G). +txn-total-size-limit = 104857600 + +[proxy-protocol] +# PROXY protocol acceptable client networks. +# Empty string means disable PROXY protocol, * means all networks. +networks = "" + +# PROXY protocol header read timeout, unit is second +header-timeout = 5 + +[prepared-plan-cache] +enabled = false +capacity = 100 +memory-guard-ratio = 0.1 + +[opentracing] +# Enable opentracing. +enable = false + +# Whether to enable the rpc metrics. +rpc-metrics = false + +[opentracing.sampler] +# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote +type = "const" + +# Param is a value passed to the sampler. +# Valid values for Param field are: +# - for "const" sampler, 0 or 1 for always false/true respectively +# - for "probabilistic" sampler, a probability between 0 and 1 +# - for "rateLimiting" sampler, the number of spans per second +# - for "remote" sampler, param is the same as for "probabilistic" +# and indicates the initial sampling rate before the actual one +# is received from the mothership +param = 1.0 + +# SamplingServerURL is the address of jaeger-agent's HTTP sampling server +sampling-server-url = "" + +# MaxOperations is the maximum number of operations that the sampler +# will keep track of. If an operation is not tracked, a default probabilistic +# sampler will be used rather than the per operation specific sampler. +max-operations = 0 + +# SamplingRefreshInterval controls how often the remotely controlled sampler will poll +# jaeger-agent for the appropriate sampling strategy. +sampling-refresh-interval = 0 + +[opentracing.reporter] +# QueueSize controls how many spans the reporter can keep in memory before it starts dropping +# new spans. The queue is continuously drained by a background go-routine, as fast as spans +# can be sent out of process. +queue-size = 0 + +# BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. +# It is generally not useful, as it only matters for very low traffic services. +buffer-flush-interval = 0 + +# LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter +# and logs all submitted spans. Main Configuration.Logger must be initialized in the code +# for this option to have any effect. +log-spans = false + +# LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address +local-agent-host-port = "" + +[tikv-client] +# Max gRPC connections that will be established with each tikv-server. +grpc-connection-count = 4 + +# After a duration of this time in seconds if the client doesn't see any activity it pings +# the server to see if the transport is still alive. +grpc-keepalive-time = 10 + +# After having pinged for keepalive check, the client waits for a duration of Timeout in seconds +# and if no activity is seen even after that the connection is closed. +grpc-keepalive-timeout = 3 + +# Max time for commit command, must be twice bigger than raft election timeout. +commit-timeout = "41s" + +# Max batch size in gRPC. +max-batch-size = 128 +# Overload threshold of TiKV. +overload-threshold = 200 +# Max batch wait time in nanosecond to avoid waiting too long. 0 means disable this feature. +max-batch-wait-time = 0 +# Batch wait size, to avoid waiting too long. +batch-wait-size = 8 + +# Enable chunk encoded data for coprocessor requests. +enable-chunk-rpc = true + +# If a Region has not been accessed for more than the given duration (in seconds), it +# will be reloaded from the PD. +region-cache-ttl = 600 + +# store-limit is used to restrain TiDB from sending request to some stores which is up to the limit. +# If a store has been up to the limit, it will return error for the successive request in same store. +# default 0 means shutting off store limit. +store-limit = 0 + +[txn-local-latches] +# Enable local latches for transactions. Enable it when +# there are lots of conflicts between transactions. +enabled = false +capacity = 2048000 + +[binlog] +# enable to write binlog. +# NOTE: If binlog is enabled, txn-total-size-limit should be less than 104857600(10M). +enable = false + +# WriteTimeout specifies how long it will wait for writing binlog to pump. +write-timeout = "15s" + +# If IgnoreError is true, when writing binlog meets error, TiDB would stop writing binlog, +# but still provide service. +ignore-error = false + +# use socket file to write binlog, for compatible with kafka version tidb-binlog. +binlog-socket = "" + +# the strategy for sending binlog to pump, value can be "range" or "hash" now. +strategy = "range" + +[pessimistic-txn] +# enable pessimistic transaction. +enable = true + +# max retry count for a statement in a pessimistic transaction. +max-retry-count = 256 + +[stmt-summary] +# enable statement summary. +enable = true + +# max number of statements kept in memory. +max-stmt-count = 200 + +# max length of displayed normalized sql and sample sql. +max-sql-length = 4096 + +# the refresh interval of statement summary, it's counted in seconds. +refresh-interval = 1800 + +# the maximum history size of statement summary. +history-size = 24 + +# experimental section controls the features that are still experimental: their semantics, +# interfaces are subject to change, using these features in the production environment is not recommended. +[experimental] +# enable column attribute `auto_random` to be defined on the primary key column. +allow-auto-random = false + +# server level isolation read by engines and labels +[isolation-read] +# engines means allow the tidb server read data from which types of engines. options: "tikv", "tiflash", "tidb". +engines = ["tikv", "tiflash", "tidb"] \ No newline at end of file diff --git a/config/tidb.toml b/config/tidb.toml index eda9aef..9f881b4 100644 --- a/config/tidb.toml +++ b/config/tidb.toml @@ -3,9 +3,6 @@ # TiDB server host. host = "0.0.0.0" -# tidb server advertise IP. -advertise-address = "" - # TiDB server port. port = 4000 @@ -22,7 +19,7 @@ socket = "" run-ddl = true # Schema lease duration, very dangerous to change only if you know what you do. -lease = "45s" +lease = "0" # When create table, split a separated region for it. It is recommended to # turn off this option if there will be a large number of tables created. @@ -31,111 +28,40 @@ split-table = true # The limit of concurrent executed sessions. token-limit = 1000 -# The maximum memory available for a single SQL statement. Default: 1GB -mem-quota-query = 1073741824 - -# Controls whether to enable the temporary storage for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. -oom-use-tmp-storage = true - -# Specifies the temporary storage path for some operators when a single SQL statement exceeds the memory quota specified by mem-quota-query. -# It defaults to `/tidb/tmp-storage` if it is unset. -# It only takes effect when `oom-use-tmp-storage` is `true`. -# tmp-storage-path = "/tmp/tidb/tmp-storage" - -# Specifies what operation TiDB performs when a single SQL statement exceeds the memory quota specified by mem-quota-query and cannot be spilled over to disk. +# Only print a log when out of memory quota. # Valid options: ["log", "cancel"] -oom-action = "cancel" +oom-action = "log" + +# Set the memory quota for a query in bytes. Default: 32GB +mem-quota-query = 34359738368 # Enable coprocessor streaming. enable-streaming = false -# Enable batch commit for the DMLs. -enable-batch-dml = false - # Set system variable 'lower_case_table_names' lower-case-table-names = 2 -# Make "kill query" behavior compatible with MySQL. It's not recommend to -# turn on this option when TiDB server is behind a proxy. -compatible-kill-query = false - -# check mb4 value in utf8 is used to control whether to check the mb4 characters when the charset is utf8. -check-mb4-value-in-utf8 = true - -# treat-old-version-utf8-as-utf8mb4 use for upgrade compatibility. Set to true will treat old version table/column UTF8 charset as UTF8MB4. -treat-old-version-utf8-as-utf8mb4 = true - -# max-index-length is used to deal with compatibility issues from v3.0.7 and previous version upgrades. It can only be in [3072, 3072*4]. -max-index-length = 3072 - -# enable-table-lock is used to control table lock feature. Default is false, indicate the table lock feature is disabled. -enable-table-lock = false - -# delay-clean-table-lock is used to control the time (Milliseconds) of delay before unlock the table in the abnormal situation. -delay-clean-table-lock = 0 - -# Maximum number of the splitting region, which is used by the split region statement. -split-region-max-num = 1000 - -# enable the TiDB to fetch configs from PD and update itself during runtime. -# see https://github.com/pingcap/tidb/pull/13660 for more details. -enable-dynamic-config = true - -# alter-primary-key is used to control alter primary key feature. Default is false, indicate the alter primary key feature is disabled. -# If it is true, we can add the primary key by "alter table". However, if a table already exists before the switch is turned true and the data type of its primary key column is an integer, -# the primary key cannot be dropped. -alter-primary-key = false - -# server-version is used to change the version string of TiDB in the following scenarios: -# 1. the server version returned by builtin-function `VERSION()`. -# 2. the server version filled in handshake packets of MySQL Connection Protocol, see https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake for more details. -# if server-version = "", the default value(original TiDB version string) is used. -server-version = "" - -# repair mode is used to repair the broken table meta in TiKV in extreme cases. -repair-mode = false - -# Repair table list is used to list the tables in repair mode with the format like ["db.table",]. -# In repair mode, repairing table which is not in repair list will get wrong database or wrong table error. -repair-table-list = [] - -# The maximum permitted number of simultaneous client connections. -max-server-connections = 4096 - -# Whether new collations are enabled, as indicated by its name, this configuration entry take effect ONLY when a TiDB cluster bootstraps for the first time. -new_collations_enabled_on_first_bootstrap = false - [log] # Log level: debug, info, warn, error, fatal. -level = "info" +level = "error" # Log format, one of json, text, console. format = "text" -# Enable automatic timestamps in log output, if not set, it will be defaulted to true. -# enable-timestamp = true - -# Enable annotating logs with the full stack error message, if not set, it will be defaulted to false. -# enable-error-stack = false - -# Whether to enable slow query log. -enable-slow-log = true +# Disable automatic timestamp in output +disable-timestamp = false # Stores slow query log into separated files. -slow-query-file = "tidb-slow.log" +slow-query-file = "" # Queries with execution time greater than this value will be logged. (Milliseconds) slow-threshold = 300 -# record-plan-in-slow-log is used to enable record query plan in slow log. -# 0 is disable. 1 is enable. -record-plan-in-slow-log = 1 - # Queries with internal result greater than this value will be logged. expensive-threshold = 10000 # Maximum query length recorded in log. -query-log-max-len = 4096 +query-log-max-len = 2048 # File logging. [log.file] @@ -151,6 +77,9 @@ max-days = 0 # Maximum number of old log files to retain. No clean up by default. max-backups = 0 +# Rotate log by day +log-rotate = true + [security] # Path of file that contains list of trusted SSL CAs for connection with mysql client. ssl-ca = "" @@ -174,37 +103,27 @@ cluster-ssl-key = "" # If enable status report HTTP service. report-status = true -# TiDB status host. -status-host = "tidb" - -## status-host is the HTTP address for reporting the internal status of a TiDB server, for example: -## API for prometheus: http://${status-host}:${status_port}/metrics -## API for pprof: http://${status-host}:${status_port}/debug/pprof # TiDB status port. status-port = 10080 -# Prometheus pushgateway address, leaves it empty will disable push to pushgateway. -metrics-addr = "" +# Prometheus pushgateway address, leaves it empty will disable prometheus push. +metrics-addr = "pushgateway:9091" -# Prometheus client push interval in second, set \"0\" to disable push to pushgateway. +# Prometheus client push interval in second, set \"0\" to disable prometheus push. metrics-interval = 15 -# Record statements qps by database name if it is enabled. -record-db-qps = false - [performance] # Max CPUs to use, 0 use number of CPUs in the machine. max-procs = 0 - -# Max memory size to use, 0 use the total usable memory in the machine. -max-memory = 0 - # StmtCountLimit limits the max count of statement inside a transaction. stmt-count-limit = 5000 # Set keep alive option for tcp connection. tcp-keep-alive = true +# The maximum number of retries when commit a transaction. +retry-limit = 10 + # Whether support cartesian product. cross-join = true @@ -214,28 +133,15 @@ stats-lease = "3s" # Run auto analyze worker on this tidb-server. run-auto-analyze = true -# Probability to use the query feedback to update stats, 0.0 or 1.0 for always false/true. -feedback-probability = 0.05 +# Probability to use the query feedback to update stats, 0 or 1 for always false/true. +feedback-probability = 0.0 # The max number of query feedback that cache in memory. query-feedback-limit = 1024 # Pseudo stats will be used if the ratio between the modify count and # row count in statistics of a table is greater than it. -pseudo-estimate-ratio = 0.8 - -# Force the priority of all statements in a specified priority. -# The value could be "NO_PRIORITY", "LOW_PRIORITY", "HIGH_PRIORITY" or "DELAYED". -force-priority = "NO_PRIORITY" - -# Bind info lease duration, which influences the duration of loading bind info and handling invalid bind. -bind-info-lease = "3s" - -# The limitation of the size in byte for the entries in one transaction. -# If using TiKV as the storage, the entry represents a key/value pair. -# NOTE: If binlog is enabled, this value should be less than 104857600(10M) because this is the maximum size that can be handled by Pumper. -# If binlog is not enabled, this value should be less than 10737418240(10G). -txn-total-size-limit = 104857600 +pseudo-estimate-ratio = 0.7 [proxy-protocol] # PROXY protocol acceptable client networks. @@ -245,10 +151,14 @@ networks = "" # PROXY protocol header read timeout, unit is second header-timeout = 5 +[plan-cache] +enabled = false +capacity = 2560 +shards = 256 + [prepared-plan-cache] enabled = false capacity = 100 -memory-guard-ratio = 0.1 [opentracing] # Enable opentracing. @@ -303,7 +213,7 @@ local-agent-host-port = "" [tikv-client] # Max gRPC connections that will be established with each tikv-server. -grpc-connection-count = 4 +grpc-connection-count = 16 # After a duration of this time in seconds if the client doesn't see any activity it pings # the server to see if the transport is still alive. @@ -313,84 +223,17 @@ grpc-keepalive-time = 10 # and if no activity is seen even after that the connection is closed. grpc-keepalive-timeout = 3 -# Max time for commit command, must be twice bigger than raft election timeout. +# max time for commit command, must be twice bigger than raft election timeout. commit-timeout = "41s" -# Max batch size in gRPC. -max-batch-size = 128 -# Overload threshold of TiKV. -overload-threshold = 200 -# Max batch wait time in nanosecond to avoid waiting too long. 0 means disable this feature. -max-batch-wait-time = 0 -# Batch wait size, to avoid waiting too long. -batch-wait-size = 8 - -# Enable chunk encoded data for coprocessor requests. -enable-chunk-rpc = true - -# If a Region has not been accessed for more than the given duration (in seconds), it -# will be reloaded from the PD. -region-cache-ttl = 600 - -# store-limit is used to restrain TiDB from sending request to some stores which is up to the limit. -# If a store has been up to the limit, it will return error for the successive request in same store. -# default 0 means shutting off store limit. -store-limit = 0 - -[txn-local-latches] -# Enable local latches for transactions. Enable it when -# there are lots of conflicts between transactions. -enabled = false -capacity = 2048000 - [binlog] -# enable to write binlog. -# NOTE: If binlog is enabled, txn-total-size-limit should be less than 104857600(10M). -enable = false + +# Socket file to write binlog. +binlog-socket = "" # WriteTimeout specifies how long it will wait for writing binlog to pump. write-timeout = "15s" -# If IgnoreError is true, when writing binlog meets error, TiDB would stop writing binlog, +# If IgnoreError is true, when writting binlog meets error, TiDB would stop writting binlog, # but still provide service. ignore-error = false - -# use socket file to write binlog, for compatible with kafka version tidb-binlog. -binlog-socket = "" - -# the strategy for sending binlog to pump, value can be "range" or "hash" now. -strategy = "range" - -[pessimistic-txn] -# enable pessimistic transaction. -enable = true - -# max retry count for a statement in a pessimistic transaction. -max-retry-count = 256 - -[stmt-summary] -# enable statement summary. -enable = true - -# max number of statements kept in memory. -max-stmt-count = 200 - -# max length of displayed normalized sql and sample sql. -max-sql-length = 4096 - -# the refresh interval of statement summary, it's counted in seconds. -refresh-interval = 1800 - -# the maximum history size of statement summary. -history-size = 24 - -# experimental section controls the features that are still experimental: their semantics, -# interfaces are subject to change, using these features in the production environment is not recommended. -[experimental] -# enable column attribute `auto_random` to be defined on the primary key column. -allow-auto-random = false - -# server level isolation read by engines and labels -[isolation-read] -# engines means allow the tidb server read data from which types of engines. options: "tikv", "tiflash", "tidb". -engines = ["tikv", "tiflash", "tidb"] \ No newline at end of file diff --git a/config/tiflash-learner.toml b/config/tiflash-learner-nightly.toml similarity index 94% rename from config/tiflash-learner.toml rename to config/tiflash-learner-nightly.toml index 3ae50a5..bb9cfa9 100644 --- a/config/tiflash-learner.toml +++ b/config/tiflash-learner-nightly.toml @@ -7,7 +7,7 @@ log-file = "/logs/tiflash_tikv.log" [readpool.storage] [server] -engine-addr = "0.0.0.0:4030" +engine-addr = "tiflash:4030" addr = "0.0.0.0:20280" advertise-addr = "tiflash:20280" #status-addr = "tiflash:20292" diff --git a/config/tiflash.toml b/config/tiflash-nightly.toml similarity index 96% rename from config/tiflash.toml rename to config/tiflash-nightly.toml index d21d6e3..2d87f12 100644 --- a/config/tiflash.toml +++ b/config/tiflash-nightly.toml @@ -35,7 +35,7 @@ size = "1000M" runAsDaemon = true [raft] -pd_addr = "pd0:2379,pd1:2379,pd2:2379" +pd_addr = "pd0:2379" storage_engine = "tmt" [quotas] diff --git a/docker-compose-tiflash-nightly.yml b/docker-compose-tiflash-nightly.yml new file mode 100644 index 0000000..3a174dc --- /dev/null +++ b/docker-compose-tiflash-nightly.yml @@ -0,0 +1,68 @@ +version: '2.1' + +services: + pd0: + image: pingcap/pd:nightly + ports: + - "2379" + volumes: + - ./config/pd-nightly.toml:/pd.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --name=pd0 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd0:2379 + - --advertise-peer-urls=http://pd0:2380 + - --initial-cluster=pd0=http://pd0:2380 + - --data-dir=/data/pd0 + - --config=/pd.toml + - --log-file=/logs/pd0.log + restart: on-failure + tikv0: + image: pingcap/tikv:nightly + volumes: + - ./config/tikv.toml:/tikv.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv0:20160 + - --data-dir=/data/tikv0 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv0.log + depends_on: + - "pd0" + restart: on-failure + tidb: + image: pingcap/tidb:nightly + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./config/tidb-nightly.toml:/tidb.toml:ro + - ./logs:/logs + command: + - --store=tikv + - --path=pd0:2379 + - --config=/tidb.toml + - --log-file=/logs/tidb.log + depends_on: + - "tikv0" + restart: on-failure + tiflash: + image: pingcap/tiflash:nightly + volumes: + - ./config/tiflash-nightly.toml:/tiflash.toml:ro + - ./config/tiflash-learner-nightly.toml:/tiflash-learner.toml:ro + - ./data:/data + - ./logs:/logs + command: + - --config=/tiflash.toml + depends_on: + - "tikv0" + - "tidb" + restart: on-failure + \ No newline at end of file diff --git a/docker-compose-tiflash.yml b/docker-compose-tiflash.yml deleted file mode 100644 index 1c1f194..0000000 --- a/docker-compose-tiflash.yml +++ /dev/null @@ -1,183 +0,0 @@ -version: '2.1' - -services: - pd0: - image: pingcap/pd:nightly - ports: - - "2379" - volumes: - - ./config/pd-nightly.toml:/pd.toml:ro - - ./data:/data - - ./logs:/logs - command: - - --name=pd0 - - --client-urls=http://0.0.0.0:2379 - - --peer-urls=http://0.0.0.0:2380 - - --advertise-client-urls=http://pd0:2379 - - --advertise-peer-urls=http://pd0:2380 - - --initial-cluster=pd0=http://pd0:2380,pd1=http://pd1:2380,pd2=http://pd2:2380 - - --data-dir=/data/pd0 - - --config=/pd.toml - - --log-file=/logs/pd0.log - restart: on-failure - pd1: - image: pingcap/pd:nightly - ports: - - "2379" - volumes: - - ./config/pd-nightly.toml:/pd.toml:ro - - ./data:/data - - ./logs:/logs - command: - - --name=pd1 - - --client-urls=http://0.0.0.0:2379 - - --peer-urls=http://0.0.0.0:2380 - - --advertise-client-urls=http://pd1:2379 - - --advertise-peer-urls=http://pd1:2380 - - --initial-cluster=pd0=http://pd0:2380,pd1=http://pd1:2380,pd2=http://pd2:2380 - - --data-dir=/data/pd1 - - --config=/pd.toml - - --log-file=/logs/pd1.log - restart: on-failure - pd2: - image: pingcap/pd:nightly - ports: - - "2379" - volumes: - - ./config/pd-nightly.toml:/pd.toml:ro - - ./data:/data - - ./logs:/logs - command: - - --name=pd2 - - --client-urls=http://0.0.0.0:2379 - - --peer-urls=http://0.0.0.0:2380 - - --advertise-client-urls=http://pd2:2379 - - --advertise-peer-urls=http://pd2:2380 - - --initial-cluster=pd0=http://pd0:2380,pd1=http://pd1:2380,pd2=http://pd2:2380 - - --data-dir=/data/pd2 - - --config=/pd.toml - - --log-file=/logs/pd2.log - restart: on-failure - tikv0: - image: pingcap/tikv:nightly - volumes: - - ./config/tikv.toml:/tikv.toml:ro - - ./data:/data - - ./logs:/logs - command: - - --addr=0.0.0.0:20160 - - --advertise-addr=tikv0:20160 - - --data-dir=/data/tikv0 - - --pd=pd0:2379,pd1:2379,pd2:2379 - - --config=/tikv.toml - - --log-file=/logs/tikv0.log - depends_on: - - "pd0" - - "pd1" - - "pd2" - restart: on-failure - tikv1: - image: pingcap/tikv:nightly - volumes: - - ./config/tikv.toml:/tikv.toml:ro - - ./data:/data - - ./logs:/logs - command: - - --addr=0.0.0.0:20160 - - --advertise-addr=tikv1:20160 - - --data-dir=/data/tikv1 - - --pd=pd0:2379,pd1:2379,pd2:2379 - - --config=/tikv.toml - - --log-file=/logs/tikv1.log - depends_on: - - "pd0" - - "pd1" - - "pd2" - restart: on-failure - tikv2: - image: pingcap/tikv:nightly - volumes: - - ./config/tikv.toml:/tikv.toml:ro - - ./data:/data - - ./logs:/logs - command: - - --addr=0.0.0.0:20160 - - --advertise-addr=tikv2:20160 - - --data-dir=/data/tikv2 - - --pd=pd0:2379,pd1:2379,pd2:2379 - - --config=/tikv.toml - - --log-file=/logs/tikv2.log - depends_on: - - "pd0" - - "pd1" - - "pd2" - restart: on-failure - tidb: - image: pingcap/tidb:nightly - ports: - - "4000:4000" - - "10080:10080" - volumes: - - ./config/tidb.toml:/tidb.toml:ro - - ./logs:/logs - command: - - --store=tikv - - --path=pd0:2379,pd1:2379,pd2:2379 - - --config=/tidb.toml - - --log-file=/logs/tidb.log - depends_on: - - "tikv0" - - "tikv1" - - "tikv2" - restart: on-failure - tiflash: - image: pingcap/tiflash:nightly - volumes: - - ./config/tiflash.toml:/tiflash.toml:ro - - ./config/tiflash-learner.toml:/tiflash-learner.toml:ro - - ./data:/data - - ./logs:/logs - command: - - --config=/tiflash.toml - depends_on: - - "tikv0" - - "tikv1" - - "tikv2" - - "tidb" - restart: on-failure - # monitors - pushgateway: - image: prom/pushgateway:v0.3.1 - command: - - --log.level=error - restart: on-failure - prometheus: - user: root - image: prom/prometheus:v2.2.1 - command: - - --log.level=error - - --storage.tsdb.path=/data/prometheus - - --config.file=/etc/prometheus/prometheus.yml - ports: - - "9091:9091" - volumes: - - ./config/prometheus.yml:/etc/prometheus/prometheus.yml:ro - - ./config/pd.rules.yml:/etc/prometheus/pd.rules.yml:ro - - ./config/tikv.rules.yml:/etc/prometheus/tikv.rules.yml:ro - - ./config/tidb.rules.yml:/etc/prometheus/tidb.rules.yml:ro - - ./data:/data - restart: on-failure - grafana: - image: grafana/grafana:6.0.1 - user: "0" - environment: - GF_LOG_LEVEL: error - GF_PATHS_PROVISIONING: /etc/grafana/provisioning - GF_PATHS_CONFIG: /etc/grafana/grafana.ini - volumes: - - ./config/grafana:/etc/grafana - - ./config/dashboards:/tmp/dashboards - - ./data/grafana:/var/lib/grafana - ports: - - "3000:3000" - restart: on-failure