Browse Source

first commit

lawrencesystems 2 năm trước cách đây
commit
3ad572558a
3 tập tin đã thay đổi với 476 bổ sung0 xóa
  1. 11 0
      README.md
  2. 385 0
      config/graylog/graylog.conf
  3. 80 0
      docker-compose.yml

+ 11 - 0
README.md

@@ -0,0 +1,11 @@
+# Overview
+
+This is a mix of [graylog2 open-core](https://github.com/Graylog2/docker-compose/blob/main/open-core/docker-compose.yml) and [graylog2 docker install docs.](https://go2docs.graylog.org/5-0/downloading_and_installing_graylog/docker_installation.htm)
+
+# Usage
+
+```
+docker compose up
+```
+
+Access graylog [here.](http://localhost:9000)

+ 385 - 0
config/graylog/graylog.conf

@@ -0,0 +1,385 @@
+# If you are running more than one instances of Graylog server you have to select one of these
+# instances as master. The master will perform some periodical tasks that non-masters won't perform.
+is_master = true
+
+# The auto-generated node ID will be stored in this file and read after restarts. It is a good idea
+# to use an absolute file path here if you are starting Graylog server from init scripts or similar.
+node_id_file = /usr/share/graylog/data/config/node-id
+
+root_username = admin
+root_timezone = UTC
+bin_dir = /usr/share/graylog/bin
+data_dir = /usr/share/graylog/data
+plugin_dir = /usr/share/graylog/plugin
+
+
+# List of Elasticsearch hosts Graylog should connect to.
+# Need to be specified as a comma-separated list of valid URIs for the http ports of your elasticsearch nodes.
+# If one or more of your elasticsearch hosts require authentication, include the credentials in each node URI that
+# requires authentication.
+
+# Maximum number of retries to connect to elasticsearch on boot for the version probe.
+#
+# Default: 0, retry indefinitely with the given delay until a connection could be established
+elasticsearch_version_probe_attempts = 5
+
+# Waiting time in between connection attempts for elasticsearch_version_probe_attempts
+#
+# Default: 5s
+elasticsearch_version_probe_delay = 5s
+
+# Maximum amount of time to wait for successful connection to Elasticsearch HTTP port.
+#
+# Default: 10 Seconds
+elasticsearch_connect_timeout = 10s
+
+# Maximum amount of time to wait for reading back a response from an Elasticsearch server.
+# (e. g. during search, index creation, or index time-range calculations)
+#
+# Default: 60 seconds
+elasticsearch_socket_timeout = 60s
+
+# Maximum idle time for an Elasticsearch connection. If this is exceeded, this connection will
+# be tore down.
+#
+# Default: inf
+#elasticsearch_idle_timeout = -1s
+
+# Maximum number of total connections to Elasticsearch.
+#
+# Default: 200
+#elasticsearch_max_total_connections = 200
+
+# Maximum number of total connections per Elasticsearch route (normally this means per
+# elasticsearch server).
+#
+# Default: 20
+#elasticsearch_max_total_connections_per_route = 20
+
+# Maximum number of times Graylog will retry failed requests to Elasticsearch.
+#
+# Default: 2
+#elasticsearch_max_retries = 2
+
+# Enable automatic Elasticsearch node discovery through Nodes Info,
+# see https://www.elastic.co/guide/en/elasticsearch/reference/5.4/cluster-nodes-info.html
+#
+# WARNING: Automatic node discovery does not work if Elasticsearch requires authentication, e. g. with Shield.
+#
+# Default: false
+#elasticsearch_discovery_enabled = true
+
+# Filter for including/excluding Elasticsearch nodes in discovery according to their custom attributes,
+# see https://www.elastic.co/guide/en/elasticsearch/reference/5.4/cluster.html#cluster-nodes
+#
+# Default: empty
+#elasticsearch_discovery_filter = rack:42
+
+# Frequency of the Elasticsearch node discovery.
+#
+# Default: 30s
+# elasticsearch_discovery_frequency = 30s
+
+# Set the default scheme when connecting to Elasticsearch discovered nodes
+#
+# Default: http (available options: http, https)
+#elasticsearch_discovery_default_scheme = http
+
+# Enable payload compression for Elasticsearch requests.
+#
+# Default: false
+#elasticsearch_compression_enabled = true
+
+# Enable use of "Expect: 100-continue" Header for Elasticsearch index requests.
+# If this is disabled, Graylog cannot properly handle HTTP 413 Request Entity Too Large errors.
+#
+# Default: true
+#elasticsearch_use_expect_continue = true
+
+# Graylog will use multiple indices to store documents in. You can configured the strategy it uses to determine
+# when to rotate the currently active write index.
+# It supports multiple rotation strategies:
+#   - "count" of messages per index, use elasticsearch_max_docs_per_index below to configure
+#   - "size" per index, use elasticsearch_max_size_per_index below to configure
+# valid values are "count", "size" and "time", default is "count"
+#
+# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these
+#            to your previous 1.x settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+rotation_strategy = count
+
+# (Approximate) maximum number of documents in an Elasticsearch index before a new index
+# is being created, also see no_retention and elasticsearch_max_number_of_indices.
+# Configure this if you used 'rotation_strategy = count' above.
+#
+# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these
+#            to your previous 1.x settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+elasticsearch_max_docs_per_index = 20000000
+
+# (Approximate) maximum size in bytes per Elasticsearch index on disk before a new index is being created, also see
+# no_retention and elasticsearch_max_number_of_indices. Default is 1GB.
+# Configure this if you used 'rotation_strategy = size' above.
+#
+# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these
+#            to your previous 1.x settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+#elasticsearch_max_size_per_index = 1073741824
+
+# (Approximate) maximum time before a new Elasticsearch index is being created, also see
+# no_retention and elasticsearch_max_number_of_indices. Default is 1 day.
+# Configure this if you used 'rotation_strategy = time' above.
+# Please note that this rotation period does not look at the time specified in the received messages, but is
+# using the real clock value to decide when to rotate the index!
+# Specify the time using a duration and a suffix indicating which unit you want:
+#  1w  = 1 week
+#  1d  = 1 day
+#  12h = 12 hours
+# Permitted suffixes are: d for day, h for hour, m for minute, s for second.
+#
+# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these
+#            to your previous 1.x settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+#elasticsearch_max_time_per_index = 1d
+
+# Disable checking the version of Elasticsearch for being compatible with this Graylog release.
+# WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss!
+#elasticsearch_disable_version_check = true
+
+# Disable message retention on this node, i. e. disable Elasticsearch index rotation.
+#no_retention = false
+
+# How many indices do you want to keep?
+#
+# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these
+#            to your previous 1.x settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+elasticsearch_max_number_of_indices = 20
+
+# Decide what happens with the oldest indices when the maximum number of indices is reached.
+# The following strategies are availble:
+#   - delete # Deletes the index completely (Default)
+#   - close # Closes the index and hides it from the system. Can be re-opened later.
+#
+# ATTENTION: These settings have been moved to the database in 2.0. When you upgrade, make sure to set these
+#            to your previous 1.x settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+retention_strategy = delete
+
+# How many Elasticsearch shards and replicas should be used per index? Note that this only applies to newly created indices.
+# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these
+#            to your previous settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+elasticsearch_shards = 4
+elasticsearch_replicas = 0
+
+# Prefix for all Elasticsearch indices and index aliases managed by Graylog.
+#
+# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these
+#            to your previous settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+elasticsearch_index_prefix = graylog
+
+# Name of the Elasticsearch index template used by Graylog to apply the mandatory index mapping.
+# Default: graylog-internal
+#
+# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these
+#            to your previous settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+#elasticsearch_template_name = graylog-internal
+
+# Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only
+# be enabled with care. See also: https://docs.graylog.org/docs/query-language
+allow_leading_wildcard_searches = false
+
+# Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and
+# should only be enabled after making sure your Elasticsearch cluster has enough memory.
+allow_highlighting = false
+
+# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea.
+# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom
+# Elasticsearch documentation: https://www.elastic.co/guide/en/elasticsearch/reference/2.3/analysis.html
+# Note that this setting only takes effect on newly created indices.
+#
+# ATTENTION: These settings have been moved to the database in Graylog 2.2.0. When you upgrade, make sure to set these
+#            to your previous settings so they will be migrated to the database!
+#            This configuration setting is only used on the first start of Graylog. After that,
+#            index related settings can be changed in the Graylog web interface on the 'System / Indices' page.
+#            Also see https://docs.graylog.org/docs/index-model#index-set-configuration
+elasticsearch_analyzer = standard
+
+# Global timeout for index optimization (force merge) requests.
+# Default: 1h
+#elasticsearch_index_optimization_timeout = 1h
+
+# Maximum number of concurrently running index optimization (force merge) jobs.
+# If you are using lots of different index sets, you might want to increase that number.
+# Default: 20
+#elasticsearch_index_optimization_jobs = 20
+
+# Mute the logging-output of ES deprecation warnings during REST calls in the ES RestClient
+#elasticsearch_mute_deprecation_warnings = true
+
+# Time interval for index range information cleanups. This setting defines how often stale index range information
+# is being purged from the database.
+# Default: 1h
+#index_ranges_cleanup_interval = 1h
+
+# Time interval for the job that runs index field type maintenance tasks like cleaning up stale entries. This doesn't
+# need to run very often.
+# Default: 1h
+#index_field_type_periodical_interval = 1h
+
+# Batch size for the Elasticsearch output. This is the maximum (!) number of messages the Elasticsearch output
+# module will get at once and write to Elasticsearch in a batch call. If the configured batch size has not been
+# reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember
+# that every outputbuffer processor manages its own batch and performs its own batch write calls.
+# ("outputbuffer_processors" variable)
+output_batch_size = 500
+
+# Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two
+# batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages
+# for this time period is less than output_batch_size * outputbuffer_processors.
+output_flush_interval = 1
+
+# As stream outputs are loaded only on demand, an output which is failing to initialize will be tried over and
+# over again. To prevent this, the following configuration options define after how many faults an output will
+# not be tried again for an also configurable amount of seconds.
+output_fault_count_threshold = 5
+output_fault_penalty_seconds = 30
+
+# The number of parallel running processors.
+# Raise this number if your buffers are filling up.
+processbuffer_processors = 5
+outputbuffer_processors = 3
+
+# The following settings (outputbuffer_processor_*) configure the thread pools backing each output buffer processor.
+# See https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html for technical details
+
+# When the number of threads is greater than the core (see outputbuffer_processor_threads_core_pool_size),
+# this is the maximum time in milliseconds that excess idle threads will wait for new tasks before terminating.
+# Default: 5000
+#outputbuffer_processor_keep_alive_time = 5000
+
+# The number of threads to keep in the pool, even if they are idle, unless allowCoreThreadTimeOut is set
+# Default: 3
+#outputbuffer_processor_threads_core_pool_size = 3
+
+# The maximum number of threads to allow in the pool
+# Default: 30
+#outputbuffer_processor_threads_max_pool_size = 30
+
+# UDP receive buffer size for all message inputs (e. g. SyslogUDPInput).
+#udp_recvbuffer_sizes = 1048576
+
+# Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping)
+# Possible types:
+#  - yielding
+#     Compromise between performance and CPU usage.
+#  - sleeping
+#     Compromise between performance and CPU usage. Latency spikes can occur after quiet periods.
+#  - blocking
+#     High throughput, low latency, higher CPU usage.
+#  - busy_spinning
+#     Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores.
+processor_wait_strategy = blocking
+
+# Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore.
+# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache.
+# Must be a power of 2. (512, 1024, 2048, ...)
+ring_size = 65536
+
+inputbuffer_ring_size = 65536
+inputbuffer_processors = 2
+inputbuffer_wait_strategy = blocking
+
+# Enable the message journal.
+message_journal_enabled = true
+
+# The directory which will be used to store the message journal. The directory must be exclusively used by Graylog and
+# must not contain any other files than the ones created by Graylog itself.
+#
+# ATTENTION:
+#   If you create a seperate partition for the journal files and use a file system creating directories like 'lost+found'
+#   in the root directory, you need to create a sub directory for your journal.
+#   Otherwise Graylog will log an error message that the journal is corrupt and Graylog will not start.
+message_journal_dir = data/journal
+
+# Journal hold messages before they could be written to Elasticsearch.
+# For a maximum of 12 hours or 5 GB whichever happens first.
+# During normal operation the journal will be smaller.
+#message_journal_max_age = 12h
+#message_journal_max_size = 5gb
+
+#message_journal_flush_age = 1m
+#message_journal_flush_interval = 1000000
+#message_journal_segment_age = 1h
+#message_journal_segment_size = 100mb
+
+# Number of threads used exclusively for dispatching internal events. Default is 2.
+#async_eventbus_processors = 2
+
+# How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual
+# shutdown process. Set to 0 if you have no status checking load balancers in front.
+lb_recognition_period_seconds = 3
+
+# MongoDB connection string
+# See https://docs.mongodb.com/manual/reference/connection-string/ for details
+#mongodb_uri = mongodb://localhost/graylog
+mongodb_uri = mongodb://mongodb/graylog
+
+# Authenticate against the MongoDB server
+# '+'-signs in the username or password need to be replaced by '%2B'
+#mongodb_uri = mongodb://grayloguser:secret@localhost:27017/graylog
+
+# Use a replica set instead of a single host
+#mongodb_uri = mongodb://grayloguser:secret@localhost:27017,localhost:27018,localhost:27019/graylog?replicaSet=rs01
+
+# DNS Seedlist https://docs.mongodb.com/manual/reference/connection-string/#dns-seedlist-connection-format
+#mongodb_uri = mongodb+srv://server.example.org/graylog
+
+# Increase this value according to the maximum connections your MongoDB server can handle from a single client
+# if you encounter MongoDB connection problems.
+mongodb_max_connections = 1000
+
+# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5
+# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5,
+# then 500 threads can block. More than that and an exception will be thrown.
+# http://api.mongodb.com/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier
+mongodb_threads_allowed_to_block_multiplier = 5
+
+# For some cluster-related REST requests, the node must query all other nodes in the cluster. This is the maximum number
+# of threads available for this. Increase it, if '/cluster/*' requests take long to complete.
+# Should be http_thread_pool_size * average_cluster_size if you have a high number of concurrent users.
+proxied_requests_thread_pool_size = 32
+
+# The allowed TLS protocols for system wide TLS enabled servers. (e.g. message inputs, http interface)
+# Setting this to an empty value, leaves it up to system libraries and the used JDK to chose a default.
+# Default: TLSv1.2,TLSv1.3  (might be automatically adjusted to protocols supported by the JDK)
+enabled_tls_protocols= TLSv1.2,TLSv1.3
+
+# Enable Prometheus exporter HTTP server.
+# Default: false
+prometheus_exporter_enabled = true
+
+# IP address and port for the Prometheus exporter HTTP server.
+# Default: 127.0.0.1:9833
+prometheus_exporter_bind_address = 127.0.0.1:9833

+ 80 - 0
docker-compose.yml

@@ -0,0 +1,80 @@
+version: '3'
+
+networks:
+  graynet:
+    driver: bridge
+
+# This is how you persist data between container restarts
+volumes:
+  mongo_data:
+    driver: local
+  os_data:
+    driver: local
+  graylog_data:
+    driver: local
+
+services:
+  # Graylog stores configuration in MongoDB
+  mongo:
+    image: mongo:6.0.5-jammy
+    container_name: mongodb
+    volumes:
+      - "mongo_data:/data/db"
+    networks:
+      - graynet
+
+  # The logs themselves are stored in Opensearch
+  opensearch:
+    image: opensearchproject/opensearch:2
+    container_name: opensearch
+    environment:
+      - "OPENSEARCH_JAVA_OPTS=-Xms1g -Xmx1g"
+      - "bootstrap.memory_lock=true"
+      - "discovery.type=single-node"
+      - "action.auto_create_index=false"
+      - "plugins.security.ssl.http.enabled=false"
+      - "plugins.security.disabled=true"
+    volumes:
+      - "os_data:/usr/share/opensearch/data"
+    ulimits:
+      memlock:
+        soft: -1
+        hard: -1
+      nofile:
+        soft: 65536
+        hard: 65536
+    ports:
+      - 9200:9200/tcp
+    networks:
+      - graynet
+
+  graylog:
+    image: graylog/graylog:5.0
+    container_name: graylog
+    environment:
+      # CHANGE ME (must be at least 16 characters)!
+      GRAYLOG_PASSWORD_SECRET: "somepasswordpepper"
+      # Password: admin
+      GRAYLOG_ROOT_PASSWORD_SHA2: "8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918"
+      GRAYLOG_HTTP_BIND_ADDRESS: "0.0.0.0:9000"
+      GRAYLOG_HTTP_EXTERNAL_URI: "http://localhost:9000/"
+      GRAYLOG_ELASTICSEARCH_HOSTS: "http://opensearch:9200"
+      GRAYLOG_MONGODB_URI: "mongodb://mongodb:27017/graylog"
+    entrypoint: /usr/bin/tini -- wait-for-it opensearch:9200 -- /docker-entrypoint.sh
+    volumes:
+      - "${PWD}/config/graylog/graylog.conf:/usr/share/graylog/config/graylog.conf"
+      - "graylog_data:/usr/share/graylog/data"
+    networks:
+      - graynet
+    restart: always
+    depends_on:
+      opensearch:
+        condition: "service_started"
+      mongo:
+        condition: "service_started"
+    ports:
+      - 9000:9000/tcp   # Graylog web interface and REST API
+      - 1514:1514/tcp   # Syslog
+      - 1514:1514/udp   # Syslog
+      - 12201:12201/tcp # GELF
+      - 12201:12201/udp # GELF