Elk Stash Logstash Indexer Error

I am attempting to run the Rancher ELK stack for:

  • Docker Logs
  • Logspout Logs
  • Rsyslogs for Nginx and PHP
  • AWS ELB Logs

However, I have a reoccuring logstash indexer error as follows:

[33mFailed to flush outgoing items {:outgoing_count=>5, :exception=>#       <Elasticsearch::Transport::Transport::Errors::InternalServerError: [500] {"error":"IllegalArgumentException[Malformed action/metadata line [3], expected a simple value for field [_type] but found [START_ARRAY]]","status":500}>, :backtrace=>["/opt/logstash/vendor/bundle/jruby/1.9/gems/elasticsearch-transport-1.0.12/lib/elasticsearch/transport/transport/base.rb:135:in `__raise_transport_error'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/elasticsearch-transport-1.0.12/lib/elasticsearch/transport/transport/base.rb:227:in `perform_request'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/elasticsearch-transport-1.0.12/lib/elasticsearch/transport/transport/http/manticore.rb:54:in `perform_request'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/elasticsearch-transport-1.0.12/lib/elasticsearch/transport/client.rb:119:in `perform_request'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/elasticsearch-api-1.0.12/lib/elasticsearch/api/actions/bulk.rb:80:in `bulk'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-1.0.5-java/lib/logstash/outputs/elasticsearch/protocol.rb:104:in `bulk'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-1.0.5-java/lib/logstash/outputs/elasticsearch.rb:519:in `submit'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-1.0.5-java/lib/logstash/outputs/elasticsearch.rb:518:in `submit'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-1.0.5-java/lib/logstash/outputs/elasticsearch.rb:543:in `flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/logstash-output-elasticsearch-1.0.5-java/lib/logstash/outputs/elasticsearch.rb:542:in `flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.20/lib/stud/buffer.rb:219:in `buffer_flush'", "org/jruby/RubyHash.java:1341:in `each'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.20/lib/stud/buffer.rb:216:in `buffer_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.20/lib/stud/buffer.rb:193:in `buffer_flush'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.20/lib/stud/buffer.rb:112:in `buffer_initialize'", "org/jruby/RubyKernel.java:1511:in `loop'", "/opt/logstash/vendor/bundle/jruby/1.9/gems/stud-0.0.20/lib/stud/buffer.rb:110:in `buffer_initialize'"], :level=>:warn}e[0m

There was a similar question on stack overflow about this but no real help to me because my logstash indexer fails regularly.

Within containers, all php and nginx access and error logs are sent via a json template.
Docker logs are sent using log-driver syslogs option.

My Logstash config is as follows (or part of it as I am currently debugging):

logstash-indexer:
  metadata:
    logstash:
      inputs: |
        redis {
          host => "redis"
          port => "6379"
          data_type => "list"
          key => "logstash"
        }
      filters: |
        if [docker.name] == "/rancher-server" {
            json {
               source => "message"
            }

            kv {}

            if [@message] {
               mutate {
                 replace => { "message" => "%{@message}" }
               }
            }
        }
      outputs: |
        elasticsearch {
          host => "elasticsearch"
          protocol => "http"
        }
logstash-collector:
  metadata:
    logstash:
      inputs: |
        udp {
          port => 5000
          codec => "json"
          type => "logspout"
        }
        udp {
          port => 5544
          type => "rsyslog"
          codec => "json"
        }
        udp {
          port => 12201
          type => "container-syslogs"
        }

      filters: |


        if [syslogtag] == "http-error" {

          grok {
            match => [ "message" , "(?<timestamp>%{YEAR}[./-]%{MONTHNUM}[./-]%{MONTHDAY}[- ]%{TIME}) \[%{LOGLEVEL:severity}\] %{POSINT:pid}#%{NUMBER}: %{GREEDYDATA:message}(?:, client: (?<client>%{IP}|%{HOSTNAME}))(?:, server: %{IPORHOST:server})(?:, request: %{QS:request})(?:, upstream: %{QS:upstream})?(?:, host: %{QS:host})?"]
            match => [ "message" , "%{WORD:method} %{URIPATHPARAM:request} %{WORD:protocol}/%{NUMBER:httpversion}"]
            overwrite => [ "message" ]
          }
        }

          if [syslogtag] == "php-fpm-access" {

            grok {
              match => [ "message" , "%{HTTPDATE:time_local} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:status}"]
            }
          }

          if [syslogtag] == "php-fpm-error" {

            grok {
              match => [ "message" , "\[%{MONTHDAY:day}-%{MONTH:month}-%{YEAR:year}\s+%{TIME:time}\] %{WORD:type}: %{GREEDYDATA:message}"]
              add_field    => { "timestamp" => "%{day}-%{month}-%{year} %{time}" }
              overwrite => [ "message" ]
            }
          }

          if [type] == "container-syslogs" {

          grok {
            match => { "message" => "%{SYSLOG5424PRI}%{NONNEGINT:ver} +(?:%{TIMESTAMP_ISO8601:ts}|-) +(?:%{HOSTNAME:containerid}|-) +(?:%{NOTSPACE:containername}|-) +(?:%{NOTSPACE:proc}|-) +(?:%{WORD:msgid}|-) +(?:%{SYSLOG5424SD:sd}|-|) +%{GREEDYDATA:msg}" }
          }
          syslog_pri { }
          date {
            match => [ "syslog_timestamp", "MMM  d HH:mm:ss", "MMM dd HH:mm:ss" ]
          }
          if !("_grokparsefailure" in [tags]) {
            mutate {
              replace => [ "@source_host", "%{syslog_hostname}" ]
              replace => [ "@message", "%{syslog_message}" ]
            }
          }
          mutate {
            remove_field => [ "syslog_hostname", "syslog_message", "syslog_timestamp" ]
          }
        }

      outputs: |
        redis {
          host => "redis"
          port => "6379"
          data_type => "list"
          key => "logstash"
        }

If anyone has solved this problem I would be super grateful for any ideas in which direction to take.

Cheers

I have the same problem with logstash and logsout from the catalog. If I check my logstash-indexer log, I have this non stop error.

Ok, I found the error. It with ElasticSearch 2 and Logstash.

19/4/2016 15:47:55 MapperParsingException[Field name [docker.hostname] cannot contain '.']
19/4/2016 15:47:55	at org.elasticsearch.index.mapper.object.ObjectMapper$TypeParser.parseProperties(ObjectMapper.java:276)
19/4/2016 15:47:55	at org.elasticsearch.index.mapper.object.ObjectMapper$TypeParser.parseObjectOrDocumentTypeProperties(ObjectMapper.java:221)
19/4/2016 15:47:55	at org.elasticsearch.index.mapper.object.RootObjectMapper$TypeParser.parse(RootObjectMapper.java:138)
19/4/2016 15:47:55	at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:119)
19/4/2016 15:47:55	at org.elasticsearch.index.mapper.DocumentMapperParser.parse(DocumentMapperParser.java:100)
19/4/2016 15:47:55	at org.elasticsearch.index.mapper.MapperService.parse(MapperService.java:435)
19/4/2016 15:47:55	at org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.applyRequest(MetaDataMappingService.java:257)
19/4/2016 15:47:55	at org.elasticsearch.cluster.metadata.MetaDataMappingService$PutMappingExecutor.execute(MetaDataMappingService.java:230)
19/4/2016 15:47:55	at org.elasticsearch.cluster.service.InternalClusterService.runTasksForExecutor(InternalClusterService.java:458)
19/4/2016 15:47:55	at org.elasticsearch.cluster.service.InternalClusterService$UpdateTask.run(InternalClusterService.java:762)
19/4/2016 15:47:55	at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(PrioritizedEsThreadPoolExecutor.java:231)
19/4/2016 15:47:55	at org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run(PrioritizedEsThreadPoolExecutor.java:194)
19/4/2016 15:47:55	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
19/4/2016 15:47:55	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
19/4/2016 15:47:55	at java.lang.Thread.run(Thread.java:745)

It seems the logstash or logspout is not compatible with ElasticSearch 2

Hi @throrin19,

Sorry for the super late reply on this! For me it was a user error. I didn’t formulate my logstash filter correctly.