/etc/shinken/shinken-specific.cfg is in shinken-core 0.6.5-2build1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 | #This config file defines Shinken specific objects like
#satellites or Realms
#
#This file can be used for defining a simple environement :
#*one scheduler that schedules the checks (but doesn't launch them)
#*one poller (that launches the checks)
#*one reactionner (that sends the notifiations)
#*one broker (that gives jobs to modules. Modules export data like logs, status.dat, mysql export, etc etc)
#*some of the brokers modules (that do the jobs)
#*one arbiter (that reads the configuration and dispatches it to all others)
#So there is no high availability here, just a simple "Nagios equivalent" (but with
#more perf and less code! )
#The scheduler is a "Host manager". It gets the hosts and theirs
#services and it schedules the checks for the pollers.
define scheduler{
scheduler_name scheduler-1 ; just the name
address localhost ; ip or dns address of the daemon
port 7768 ; tcp port of the daemon
#optional
spare 0 ; (0 = not a spare, 1 = is spare)
weight 1 ; (some schedulers can manage more hosts than others)
timeout 3 ; 'ping' timeout
data_timeout 120 ; 'data send' timeout
max_check_attempts 3 ; if at least max_check_attempts ping failed, the node is DEAD
check_interval 60 ; ping it every minute
# modules PickleRetention
# Interesting modules :
# PickleRetention : Save data before exiting in flat file
# MemcacheRetention : Same but in a memcache server
# RedisRetention : same, but in a Redis server :)
# NagiosRetention : only READ retention info from a nagios retention file
#advanced feature
realm All ; optional (realm are multisites features)
}
#Pollers launch checks
define poller{
poller_name poller-1
address localhost
port 7771
#optional
manage_sub_realms 0 ; optional and advanced: does it take jobs from schedulers of sub realms?
min_workers 4 ; optional : starts with N processes workers. 0 means : "number of cpus"
max_workers 4 ; optional : no more than N processes workers. 0 means : "number of cpus"
processes_by_worker 256 ; optional : each workers manage 256 checks
polling_interval 1 ; optional : take jobs from schedulers each 1 second
timeout 3 ; 'ping' timeout
data_timeout 120 ; 'data send' timeout
check_interval 60 ; ping it every minute
max_check_attempts 3 ; if at least max_check_attempts ping failed, the node is DEAD
# advanced features
#modules NrpeBooster
#poller_tags None
realm All
}
# Poller can have a nrpe module : commands tagged with
# nrpe_poller as module_type will be managed by this module.
# it will bypass the launch of check_nrpe, and will do the
# connection itself.
#define module{
# module_name NrpeBooster
# module_type nrpe_poller
#}
#Reactionner launch notifications
define reactionner{
reactionner_name reactionner-1
address localhost
port 7769
spare 0
#optionnal
manage_sub_realms 0 ;optionnal : like for poller
min_workers 1 ;optionnal : like for poller
max_workers 15 ;optionnal : like for poller
polling_interval 1 ;optionnal : like for poller
timeout 3 ; 'ping' timeout
data_timeout 120 ; 'data send' timeout
check_interval 60 ; ping it every minute
max_check_attempts 3 ; if at least max_check_attempts ping failed, the node is DEAD
#optionnal
realm All
}
#The broker manages data export (in flat file or in database)
#with it's modules
#Here just log files and status.dat file modules
define broker{
broker_name broker-1
address localhost
port 7772
spare 0
# Which modules to load? LiveSatus and logs by default.
modules Livestatus, Simple-log
# Other interesting modules to add :
# PickleRetentionBroker : save data when quitting
# ToNdodb_Mysql : NDO database support
# NPCD : Use the PNP addon
# Optionnal
manage_sub_realms 1 ; optionnal, like for poller
manage_arbiters 1 ; optionnal : take data from Arbiter. There should be
;only one broker for the arbiter
check_interval 60 ; ping it every minute
timeout 3 ; 'ping' timeout
data_timeout 120 ; 'data send' timeout
max_check_attempts 3 ; if at least max_check_attempts ping failed, the node is DEAD
# Advanced
realm All
}
##Now the modules for the broker. The first 2 that are used, and all the others :)
#The log managment for ALL daemons (all in one log, cool isn't it? ).
define module{
module_name Simple-log
module_type simple_log
path /var/log/shinken/nagios.log
archive_path /var/log/shinken/archives/
}
#Status.dat and objects.cache export. For the old Nagios
#interface
define module{
module_name Status-Dat
module_type status_dat
status_file /var/log/shinken/status.dat
object_cache_file /var/log/shinken/objects.cache
status_update_interval 15 ; update status.dat every 15s
}
##All other modules that can be called if you have installed
#the databases, or if you want to test something else :)
#Here the NDO/MySQL module
#So you can use with NagVis or Centreon
#define module{
# module_name ToNdodb_Mysql
# module_type ndodb_mysql
# database ndo ; database name
# user root ; user of the database
# password root ; must be changed
# host localhost ; host to connect to
# character_set utf8 ;optionnal, UTF8 is the default
# If you want to mix Shinken AND Nagios/icinga in the same db
# you need to offset shinken instance id so they will not
# override/delete other ones. Here for 5 nagios box.
# nagios_mix_offset 5
#}
#Here a NDO/Oracle module. For Icinga web connection
#Or for DBA that do not like MySQL
#define module{
# module_name ToNdodb_Oracle
# module_type ndodb_oracle
# database XE ;database name (listener in fact)
# user system ;user to connect
# password password ;Yes I know I have to change my default password...
# oracle_home /usr/lib/oracle/xe/app/oracle/product/10.2.0/server ;optional, but can be useful
#}
#Here for Merlin/MySQL. For the cool Ninja connection
#define module{
# module_name ToMerlindb_Mysql
# module_type merlindb
# backend mysql ;backend to use, here mysql databse
# database merlin ;database name
# user root ; ? .. yes, the user of the database...
# password root ; wtf? you ask?
# host localhost ; host of the database
# character_set utf8 ;optionnal, UTF8 is the default
#}
#Here the Merlin/Sqlite. No one uses it for now :)
#You look at something : it's also the merlindb module, like the previous,
#it's the same code, it's just the backend parameter that change (and path).
#define module{
# module_name ToMerlindb_Sqlite
# module_type merlindb
# backend sqlite ;like the mysql, but sqlite :)
# database_path /tmp/merlindb.sqlite ;path of the sqlite file
#}
#Here the couchdb export. Maybe use one day...
#I should do a mangodb too one day...
#and casandra...
#and voldemort...
#and all other NoSQL database in fact :)
#define module{
# module_name ToCouchdb
# module_type couchdb
# user root
# password root
# host localhost
#}
#Export services perfdata to flat file. For centreon or
#perfparse
define module{
module_name Service-Perfdata
module_type service_perfdata
path /var/log/shinken/service-perfdata
# mode a ; optionnal. a = append, w = overwrite, p =pipe
# template $LASTSERVICECHECK$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$\t$SERVICESTATE$\n
}
#For hosts this time
#like the previous, but for hosts...
define module{
module_name Host-Perfdata
module_type host_perfdata
path /var/log/shinken/host-perfdata
# mode a ; optionnal. a = append, w = overwrite, p =pipe
# template $LASTHOSTCHECK$\t$HOSTNAME$\t$HOSTOUTPUT$\t$HOSTSTATE$\t$HOSTPERFDATA$\n
}
#You know livestatus? Yes, there a Livestatus module for shinken too :)
define module{
module_name Livestatus
module_type livestatus
host * ; * = listen on all configured ip addresses
port 50000 ; port to listen
database_file /var/log/shinken/livestatus.db
# Only set debug if you got problem with this module
# debug /tmp/ls.debug
# Set to 1 if you want to dump queries/responses too
# warning : it's very verbose
# debug_queries 0
}
#Send all logs to syslog
define module{
module_name Syslog
module_type syslog
}
#Use with the PNP interface
#define module{
# module_name NPCD
# module_type npcdmod
# config_file /usr/local/pnp4nagios/etc/npcd.cfg
#}
############################# For the schedulers
#Now the good flat file for retention module
define module{
module_name PickleRetention
module_type pickle_retention_file_generic
path /var/log/shinken/retention.dat
}
#Now the good flat file for retention module
define module{
module_name PickleRetentionBroker
module_type pickle_retention_file_generic
path /var/log/shinken/retention_broker.dat
}
#Now the good flat file for retention module
define module{
module_name PickleRetentionArbiter
module_type pickle_retention_file_generic
path /var/log/shinken/retention_arbiter.dat
}
############################# For the schedulers
#Now the good flat file for retention module
define module{
module_name NagiosRetention
module_type nagios_retention_file
path /var/log/shinken/retention-nagios.dat
}
#Now the memcache one
#Now the good flat file for retention module
define module{
module_name MemcacheRetention
module_type memcache_retention
server 127.0.0.1
port 11211
}
#And the redis one
#Now the good flat file for retention module
define module{
module_name RedisRetention
module_type redis_retention
server 127.0.0.1
}
# The old namaed pipe way from Nagios
#define module{
# module_name CommandFile
# module_type named_pipe
# command_file /usr/local/shinken/var/rw/nagios.cmd
#}
#You know GLPI? You can load your hosts from this app (
#with the webservices plugins for GLPI, in xmlrpc mode)
#All hosts read from this will be added to the others of the
#standard flat file
#Warning : just load host_name, experimental.
#define module{
# module_name GLPIImport
# module_type glpi_import
# uri http://localhost/glpi/plugins/webservices/xmlrpc.php
# login_name glpi
# login_password glpi
# use_property otherserial ;optionnal. Will take use value from the otherserial
# field in GLPI (Inventory number: in the web interface)
#}
#You know GLPI? You can load all configuration from this app (
#with the webservices plugins for GLPI, in xmlrpc mode
# and with plugin monitoring for GLPI)
#All configuration read from this will be added to the others of the
#standard flat file
#define module{
# module_name GLPI
# module_type glpi
# uri http://localhost/glpi/plugins/webservices/xmlrpc.php
# login_name glpi
# login_password glpi
#}
#You know NSCA? You can send check results to shinken
#using send_nsca command
#define module{
# module_name NSCA
# module_type nsca_server
# host *
# port 5667
# encryption_method 0
# password helloworld
#}
#This module implement TSCA, a thrift interface to submit checks result
#define module{
# module_name TSCA
# module_type tsca_server
# host *
# port 9090
#}
# You know VMWare? It's cool to VMotion VM, but after it's hard to
# follow host dependencies when it move. With this module, you can
# just lookup at the vcenter from times to times and update dependencies
#define module{
# module_name VMWare_auto_linking
# module_type hot_dependencies
# mapping_file /tmp/vmware_mapping_file.json
# mapping_command /usr/local/shinken/libexec/link_vmware_host_vm.py -x '/usr/local/shinken/libexec/check_esx3.pl' -V 'vcenter.mydomain.com' -u 'admin' -p 'secret' -r 'lower|nofqdn' -o /tmp/vmware_mapping_file.json
# mapping_command_interval 60 ; optionnal
# mapping_command_timeout 300 ; optionnal
# Only useful if you want debug output. Can
# be verbose for large installations
# debug 1
#}
# Arbiter module to change on the fly a poller tag of a
# command by another.
# Useful when you use a fixed configuration tool that do not allow you
# to configure poller_tag.
#define module{
# module_name HackCommandsPollerTag
# module_type hack_commands_poller_tag
# cmd_line_match (.*)check_esx3(.*)
# poller_tag esx3
#}
#Hosts and Services configuration can be pulled from a MySQL database
#All hosts and services read from the database will be added to the others of the
#standard flat file
#You can easily use an existing database, you just have to define the queries.
#It can be a useful module to use for HA too :)
#Warning : only for hosts and services
#define module{
# module_name MySQLImport
# module_type mysql_import
# host localhost
# login root
# password azerty
# database supervision
# reqhosts SELECT host_name, alias, realm, address ,template AS 'use' FROM hosts
# reqservices SELECT host_name, service_description, normal_check_interval, check_command ,template AS 'use' FROM services
# reqcontacts SELECT contact_name, email, template AS 'use' FROM contacts
#}
#The arbiter definition is optionnal
#WARNING : You must change host_name with the
#hostname of your machine !!!!
define arbiter{
arbiter_name Arbiter-Master
# host_name node1 ;result of the hostname command under Unix
address localhost ;IP or DNS adress
port 7770
spare 0
# uncomment the line below if you want to use the GLPI module and the NSCA one
# modules CommandFile,GLPIImport, NSCA, VMWare_auto_linking, TSCA
# List of interesting modules :
# CommandFile : open the named pipe nagios.cmd
# PickleRetentionArbiter : save data before exiting
# NSCA : NSCA server
# VMWare_auto_linking : lookup at Vphere server for dependencies
# GLPIImport : import hosts from GLPI
# TSCA : TSCA server
}
# The receiver manages passive information. It's just a "buffer" that
# will be readfrom the arbiter to dispatch data
#define receiver{
# receiver_name receiver-1
# address localhost
# port 7773
# spare 0
#
# # The main goal of the receiver is to load
# # Passive modules, like the NSCA one
# #modules NSCA,CommandFile
#
# timeout 3 ; 'ping' timeout
# data_timeout 120 ; 'data send' timeout
# max_check_attempts 3 ; if at least max_check_attempts ping failed, the node is DEAD
# check_interval 60 ; ping it every minute
#
# #advanced
# realm All
# }
#Very advanced feature for multisite managment.
#Consider to look at the doc before touching it :)
define realm{
realm_name All
default 1
}
###Now specific objects that allow easy change in the service/hots behavior
#A result modulation is use to module a check result, like CRITICAL->WARNING here
#define resultmodulation{
# resultmodulation_name critical_is_warning ;required
# exit_codes_match 2 ;optionnal, list of code to change
# exit_code_modulation 1 ;code that will be put if the code match
# modulation_period 24x7 ;period when to apply the modulation
#}
|