Skip to content
This repository has been archived by the owner on May 4, 2021. It is now read-only.

Update config to InfluxDB 0.10.x #83

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
213 changes: 142 additions & 71 deletions 0.10/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
# Change this option to true to disable reporting.
reporting-disabled = false

# we'll try to get the hostname automatically, but if it the os returns something
# that isn't resolvable by other servers in the cluster, use this option to
# manually set the hostname
hostname = "localhost"

###
### [meta]
###
Expand All @@ -19,14 +24,16 @@ reporting-disabled = false
# Controls if this node should run the metaservice and participate in the Raft group
enabled = true

# Where the metadata/raft database is stored
dir = "/data/meta"
hostname = "localhost"

bind-address = ":8088"
retention-autocreate = true
election-timeout = "1s"
heartbeat-timeout = "1s"
leader-lease-timeout = "500ms"
commit-timeout = "50ms"
cluster-tracing = false

###
### [data]
Expand All @@ -40,17 +47,18 @@ reporting-disabled = false
[data]
# Controls if this node holds time series data shards in the cluster
enabled = true

dir = "/data/db"

# The following WAL settings are for the b1 storage engine used in 0.9.2. They won't
# apply to any new shards created after upgrading to a version > 0.9.3.
max-wal-size = 104857600 # Maximum size the WAL can reach before a flush. Defaults to 100MB.
wal-flush-interval = "10m0s" # Maximum time data can sit in WAL before a flush.
wal-flush-interval = "10m" # Maximum time data can sit in WAL before a flush.
wal-partition-flush-delay = "2s" # The delay time between each WAL partition being flushed.

# These are the WAL settings for the storage engine >= 0.9.3
wal-dir = "/data/wal"
wal-enable-logging = true
wal-logging-enabled = true
data-logging-enabled = true

# When a series in the WAL in-memory cache reaches this size in bytes it is marked as ready to
Expand All @@ -73,24 +81,87 @@ reporting-disabled = false
# The more memory you have, the bigger this can be.
# wal-partition-size-threshold = 20971520

# Whether queries should be logged before execution. Very useful for troubleshooting, but will
# log any sensitive data contained within a query.
# query-log-enabled = true

# Settings for the TSM engine

# CacheMaxMemorySize is the maximum size a shard's cache can
# reach before it starts rejecting writes.
# cache-max-memory-size = 524288000

# CacheSnapshotMemorySize is the size at which the engine will
# snapshot the cache and write it to a TSM file, freeing up memory
# cache-snapshot-memory-size = 26214400

# CacheSnapshotWriteColdDuration is the length of time at
# which the engine will snapshot the cache and write it to
# a new TSM file if the shard hasn't received writes or deletes
# cache-snapshot-write-cold-duration = "1h"

# MinCompactionFileCount is the minimum number of TSM files
# that need to exist before a compaction cycle will run
# compact-min-file-count = 3

# CompactFullWriteColdDuration is the duration at which the engine
# will compact all TSM files in a shard if it hasn't received a
# write or delete
# compact-full-write-cold-duration = "24h"

# MaxPointsPerBlock is the maximum number of points in an encoded
# block in a TSM file. Larger numbers may yield better compression
# but could incur a performance peanalty when querying
# max-points-per-block = 1000

###
### [hinted-handoff]
###
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period
### of time.
###

[hinted-handoff]
enabled = true
dir = "/data/hh"
max-size = 1073741824
max-age = "168h"
retry-rate-limit = 0

# Hinted handoff will start retrying writes to down nodes at a rate of once per second.
# If any error occurs, it will backoff in an exponential manner, until the interval
# reaches retry-max-interval. Once writes to all nodes are successfully completed the
# interval will reset to retry-interval.
retry-interval = "1s"
retry-max-interval = "1m"

# Interval between running checks for data that should be purged. Data is purged from
# hinted-handoff queues for two reasons. 1) The data is older than the max age, or
# 2) the target node has been dropped from the cluster. Data is never dropped until
# it has reached max-age however, for a dropped node or not.
purge-interval = "1h"

###
### [cluster]
###
### Controls non-Raft cluster behavior, which generally includes how data is
### shared across shards.
###

[cluster]
write-timeout = "5s" # The time within which a write operation must complete on the cluster.
shard-writer-timeout = "5s" # The time within which a shard must respond to write.
shard-writer-timeout = "5s" # The time within which a remote shard must respond to a write request.
write-timeout = "10s" # The time within which a write request must complete on the cluster.

###
### [retention]
###
### Controls the enforcement of retention policies for evicting old data.
###

[retention]
enabled = true
check-interval = "10m0s"
check-interval = "30m"

###
### [shard-precreation]
Expand All @@ -105,12 +176,27 @@ reporting-disabled = false
check-interval = "10m"
advance-period = "30m"

###
### Controls the system self-monitoring, statistics and diagnostics.
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.

[monitor]
store-enabled = true # Whether to record statistics internally.
store-database = "_internal" # The destination database for recorded statistics
store-interval = "10s" # The interval at which to record statistics

###
### [admin]
###
### Controls the availability of the built-in, web-based admin interface. If HTTPS is
### enabled for the admin interface, HTTPS must also be enabled on the [http] service.
###

[admin]
enabled = true
bind-address = ":8083"
Expand All @@ -123,6 +209,7 @@ reporting-disabled = false
### Controls how the HTTP endpoints are configured. These are the primary
### mechanism for getting data into and out of InfluxDB.
###

[http]
enabled = true
bind-address = ":8086"
Expand All @@ -138,63 +225,84 @@ reporting-disabled = false
###
### Controls one or many listeners for Graphite data.
###

[[graphite]]
enabled = false
bind-address = ":2003"
protocol = "tcp"
consistency-level = "one"
separator = "."
database = "graphitedb"
# database = "graphite"
# bind-address = ":2003"
# protocol = "tcp"
# consistency-level = "one"

# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.

# batch-size = 1000 # will flush if this many points get buffered
# batch-size = 5000 # will flush if this many points get buffered
# batch-pending = 10 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
batch-size = 1000
batch-timeout = "1s"
templates = [
# filter + template
#"*.app env.service.resource.measurement",
# udp-read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.

### This string joins multiple matching 'measurement' values providing more control over the final measurement name.
# separator = "."

# filter + template + extra tag
#"stats.* .host.measurement* region=us-west,agent=sensu",
### Default tags that will be added to all metrics. These can be overridden at the template level
### or by tags extracted from metric
# tags = ["region=us-east", "zone=1c"]

# default template. Ignore the first graphite component "servers"
"instance.profile.measurement*"
]
### Each template line requires a template pattern. It can have an optional
### filter before the template and separated by spaces. It can also have optional extra
### tags following the template. Multiple tags should be separated by commas and no spaces
### similar to the line protocol format. There can be only one default template.
# templates = [
# "*.app env.service.resource.measurement",
# # Default template
# "server.*",
# ]

###
### [collectd]
###
### Controls the listener for collectd data.
###

[collectd]
enabled = false
# bind-address = ":25826"
# database = "collectd"
# retention-policy = ""
# typesdb = "/usr/share/collectd/types.db"
# bind-address = ""
# database = ""
# typesdb = ""

# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.

# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.

###
### [opentsdb]
###
### Controls the listener for OpenTSDB data.
###

[opentsdb]
enabled = false
# bind-address = ":4242"
# database = "opentsdb"
# retention-policy = ""
# consistency-level = "one"
# tls-enabled = false
# certificate= ""
# log-point-errors = true # Log an error for every malformed point.

# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Only points
# metrics received over the telnet protocol undergo batching.

# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit

###
### [[udp]]
Expand All @@ -204,39 +312,21 @@ reporting-disabled = false

[[udp]]
enabled = false
bind-address = ":4444"
database = "udpdb"
# bind-address = ""
# database = "udp"
# retention-policy = ""

# These next lines control how batching works. You should have this enabled
# otherwise you could get dropped metrics or poor performance. Batching
# will buffer points in memory if you have many coming in.

# batch-size = 1000 # will flush if this many points get buffered
# batch-pending = 5 # number of batches that may be pending in memory
# batch-timeout = "1s" # will flush at least this often even if we haven't hit buffer limit
# read-buffer = 0 # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max.

###
### [monitoring]
###
### Send anonymous usage statistics to m.influxdb.com?
###
[monitoring]
enabled = false
write-interval = "24h"

###
### Controls the system self-monitoring, statistics and diagnostics.
###
### The internal database for monitoring data is created automatically if
### if it does not already exist. The target retention within this database
### is called 'monitor' and is also created with a retention period of 7 days
### and a replication factor of 1, if it does not exist. In all cases the
### this retention policy is configured as the default for the database.

[monitor]
store-enabled = true # Whether to record statistics internally.
store-database = "_internal" # The destination database for recorded statistics
store-interval = "10s" # The interval at which to record statistics

# set the expected UDP payload size; lower values tend to yield better performance, default is max UDP size 65536
# udp-payload-size = 65536

###
### [continuous_queries]
Expand All @@ -247,23 +337,4 @@ reporting-disabled = false
[continuous_queries]
log-enabled = true
enabled = true
recompute-previous-n = 2
recompute-no-older-than = "10m0s"
compute-runs-per-interval = 10
compute-no-more-than = "2m0s"

###
### [hinted-handoff]
###
### Controls the hinted handoff feature, which allows nodes to temporarily
### store queued data when one node of a cluster is down for a short period
### of time.
###

[hinted-handoff]
enabled = true
dir = "/data/hh"
max-size = 1073741824
max-age = "168h"
retry-rate-limit = 0
retry-interval = "1s"
# run-interval = "1s" # interval for how often continuous queries will be checked if they need to run