How to delete all logs on graylog server container

how to delete all logs on graylog container on docker
of course you must Rotate and recalculate for all indexes
step 1 :
system => indices
maintenance => Rotate active write index
maintenance => Recalculate index ranges

==================================
version: '3.3'

services:
#
# monitoring
#
  prom:
    image: quay.io/prometheus/prometheus:v2.0.0
    restart: always
    volumes:
     - ./monitor/prometheus.yml:/etc/prometheus/prometheus.yml
    command: "--config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus"
    ports:
     - 9090:9090
    depends_on:
     - exporter
    logging:
      driver: gelf
      options:
        gelf-address: udp://192.168.100.100:12201

  exporter:
    image: prom/node-exporter:latest
    restart: always
    ports:
     - "9100:9100"
       #    network_mode: host
    logging:
      driver: gelf
      options:
        gelf-address: udp://192.168.100.100:12201

  grafana:
    restart: always
    image: grafana/grafana
    ports:
     - "3000:3000"
    depends_on:
    - prom
    volumes:
    - ./grafana_data:/var/lib/grafana
    logging:
      driver: gelf
      options:
        gelf-address: udp://192.168.100.100:12201


  cadvisor:
    image: google/cadvisor:latest
    container_name: monitoring_cadvisor
    restart: unless-stopped
    volumes:
      - /:/rootfs:ro
      - /var/run:/var/run:rw
      - /sys:/sys:ro
      - /var/lib/docker/:/var/lib/docker:ro
    expose:
      - 8080

#
# logserver
#
  graylog-mongo:
    image: "mongo:3"
    restart: always
    volumes:
    - ./graylog/mongodb:/data/db
    environment:
      - TZ=Asia/Tehran
      - AUTO_UPDATES_ON=true

  graylog-elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.8.2
    restart: always
    #command: "elasticsearch -Des.cluster.name='graylog'"
    volumes:
    - ./graylog/elasticsearchdata:/usr/share/elasticsearch/data
    ports:
    - 9200:9200
    environment:
      - TZ=Asia/Tehran
      - AUTO_UPDATES_ON=true
      - http.host=0.0.0.0
      - transport.host=localhost
      - network.host=0.0.0.0
      - node.max_local_storage_nodes=4
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      # config added by da.na
      - elasticsearch_max_time_per_index=1d # limit 
      - elasticsearch_max_number_of_indices=8
      - indices.fielddata.cache.size=20%
      - elasticsearch_shards=1
      - elasticsearch_replicas=1
    ulimits:
      memlock:
        soft: -1
        hard: -1

  graylog:
    image: graylog/graylog:3.1
    restart: always
    volumes:
       - ./graylog/journal:/usr/share/graylog/data/journal
       - ./graylog/config:/usr/share/graylog/data/config
    environment:
      - TZ=Asia/Tehran
      - AUTO_UPDATES_ON=true
      - GRAYLOG_PASSWORD_SECRET=xxxxxxxxxx
      - GRAYLOG_ROOT_PASSWORD_SHA2=<.....>
      - GRAYLOG_WEB_ENDPOINT_URI=http://192.168.x.x:9000/api/
      - GRAYLOG_HTTP_BIND_ADDRESS=0.0.0.0:9000
      - GRAYLOG_HTTP_EXTERNAL_URI=http://192.168.x.x:9000/
      - GRAYLOG_TRANSPORT_EMAIL_WEB_INTERFACE_URL=http://192.168.x.x:9000
      - GRAYLOG_TRANSPORT_EMAIL_HOSTNAME=mail.eniac-tech.local
      - GRAYLOG_TRANSPORT_EMAIL_ENABLED=true
      - GRAYLOG_TRANSPORT_EMAIL_PORT=25
      - GRAYLOG_TRANSPORT_EMAIL_USE_AUTH=false
      - GRAYLOG_TRANSPORT_EMAIL_USE_TLS=false
      - GRAYLOG_TRANSPORT_EMAIL_USE_SSL=false
      - GRAYLOG_TRANSPORT_FROM_EMAIL=administrator@bahram.com
      - GRAYLOG_TRANSPORT_SUBJECT_PREFIX=[graylog]
    links:
      - graylog-mongo:mongo
      - graylog-elasticsearch:elasticsearch
    depends_on:
      - graylog-mongo
      - graylog-elasticsearch
    ports:
      # Graylog web interface and REST API
      - 9000:9000
      # Syslog TCP
      - 8514:8514
      # Syslog UDP
      - 8514:8514/udp
      # GELF TCP
      - 12201:12201
      # GELF UDP
      - 12201:12201/udp

=========================================

step 2:

docker-compose down

docker-compose up -d

df -h

Hey @bahram

Just checking to see if I got this correct,
When you execute docker logs -f <container_id> are those the logs your referring to?

EDIT: If your referring to the logs that were ingested by Graylog/Elasticsearch and not system logs then removing (i.e. Delete) those logs.
First stop the INPUT/s and navigate to System/Indices and click the Index set/s that’s preferred and delete on all the shards in the index sets. You may need to set the index rotation to something small until you removed all the messages from that index set. This depends on what you want to do. I personally would be careful of deleting the whole index called Default index set maybe only removing the shards.

Hi smith
Thanks a lot for guide
All container logs are sent to Graylog through the following module in docker-compose.yml file

{
   "log-driver": "gelf",
   "log-opts": {
     "gelf-address": "udp://1.2.3.4:12201"
   }
}

I would like to correct that none of the indexes are removed by rotate only that With the following command, you can delete all or only a specific index
curl -X DELETE ‘http://localhost:9200/_all
AND
And it should be removed from the database
with the following commands

# mongo
> show db
>use graylog (your database if different)
>show collections
>db.index_failures.find() (to find messages)
>db.index_failures.drop() (to delete messages)

So if you manaully deleted the Indices from elstisearch, may i add that is probably not good idea, but if you have to do that then yes , the metadata would be in Monodb since you did not delete the indices from Grayloys web ui,

> use graylog;
switched to db graylog
> show collections
> db.index_sets.find().pretty();

That is for index failures not your index set.

Be warned the following command may or may not help, it might make things worse but I have had to use them before.

db.index_sets.drop()

OR

db.index_sets.drop( { writeConcern: <document> } )

1 Like

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.