/usr/lib/python2.7/dist-packages/aodh/evaluator/threshold.py is in python-aodh 2.0.0-0ubuntu1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 | #
# Copyright 2013-2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import operator
import six
from ceilometerclient import client as ceiloclient
from oslo_log import log
from oslo_utils import timeutils
from aodh import evaluator
from aodh.evaluator import utils
from aodh.i18n import _, _LW
from aodh import keystone_client
LOG = log.getLogger(__name__)
COMPARATORS = {
'gt': operator.gt,
'lt': operator.lt,
'ge': operator.ge,
'le': operator.le,
'eq': operator.eq,
'ne': operator.ne,
}
class ThresholdEvaluator(evaluator.Evaluator):
# the sliding evaluation window is extended to allow
# for reporting/ingestion lag
look_back = 1
def __init__(self, conf):
super(ThresholdEvaluator, self).__init__(conf)
self._cm_client = None
@property
def cm_client(self):
if self._cm_client is None:
auth_config = self.conf.service_credentials
self._cm_client = ceiloclient.get_client(
version=2,
session=keystone_client.get_session(self.conf),
# ceiloclient adapter options
region_name=auth_config.region_name,
interface=auth_config.interface,
)
return self._cm_client
@classmethod
def _bound_duration(cls, rule):
"""Bound the duration of the statistics query."""
now = timeutils.utcnow()
# when exclusion of weak datapoints is enabled, we extend
# the look-back period so as to allow a clearer sample count
# trend to be established
look_back = (cls.look_back if not rule.get('exclude_outliers')
else rule['evaluation_periods'])
window = ((rule.get('period', None) or rule['granularity'])
* (rule['evaluation_periods'] + look_back))
start = now - datetime.timedelta(seconds=window)
LOG.debug('query stats from %(start)s to '
'%(now)s', {'start': start, 'now': now})
return start.isoformat(), now.isoformat()
@staticmethod
def _sanitize(rule, statistics):
"""Sanitize statistics."""
LOG.debug('sanitize stats %s', statistics)
if rule.get('exclude_outliers'):
key = operator.attrgetter('count')
mean = utils.mean(statistics, key)
stddev = utils.stddev(statistics, key, mean)
lower = mean - 2 * stddev
upper = mean + 2 * stddev
inliers, outliers = utils.anomalies(statistics, key, lower, upper)
if outliers:
LOG.debug('excluded weak datapoints with sample counts %s',
[s.count for s in outliers])
statistics = inliers
else:
LOG.debug('no excluded weak datapoints')
# in practice statistics are always sorted by period start, not
# strictly required by the API though
statistics = statistics[-rule['evaluation_periods']:]
result_statistics = [getattr(stat, rule['statistic'])
for stat in statistics]
LOG.debug('pruned statistics to %d', len(statistics))
return result_statistics
def _statistics(self, rule, start, end):
"""Retrieve statistics over the current window."""
after = dict(field='timestamp', op='ge', value=start)
before = dict(field='timestamp', op='le', value=end)
query = copy.copy(rule['query'])
query.extend([before, after])
LOG.debug('stats query %s', query)
try:
return self.cm_client.statistics.list(
meter_name=rule['meter_name'], q=query,
period=rule['period'])
except Exception:
LOG.exception(_('alarm stats retrieval failed'))
return []
@staticmethod
def _reason_data(disposition, count, most_recent):
"""Create a reason data dictionary for this evaluator type."""
return {'type': 'threshold', 'disposition': disposition,
'count': count, 'most_recent': most_recent}
@classmethod
def _reason(cls, alarm, statistics, state, count):
"""Fabricate reason string."""
if state == evaluator.OK:
disposition = 'inside'
count = len(statistics) - count
else:
disposition = 'outside'
last = statistics[-1] if statistics else None
transition = alarm.state != state
reason_data = cls._reason_data(disposition, count, last)
if transition:
return (_('Transition to %(state)s due to %(count)d samples'
' %(disposition)s threshold, most recent:'
' %(most_recent)s')
% dict(reason_data, state=state)), reason_data
return (_('Remaining as %(state)s due to %(count)d samples'
' %(disposition)s threshold, most recent: %(most_recent)s')
% dict(reason_data, state=state)), reason_data
def evaluate_rule(self, alarm_rule):
"""Evaluate alarm rule.
:returns: state, trending state and statistics.
"""
start, end = self._bound_duration(alarm_rule)
statistics = self._statistics(alarm_rule, start, end)
statistics = self._sanitize(alarm_rule, statistics)
sufficient = len(statistics) >= alarm_rule['evaluation_periods']
if not sufficient:
return evaluator.UNKNOWN, None, statistics, len(statistics)
def _compare(value):
op = COMPARATORS[alarm_rule['comparison_operator']]
limit = alarm_rule['threshold']
LOG.debug('comparing value %(value)s against threshold'
' %(limit)s', {'value': value, 'limit': limit})
return op(value, limit)
compared = list(six.moves.map(_compare, statistics))
distilled = all(compared)
unequivocal = distilled or not any(compared)
number_outside = len([c for c in compared if c])
if unequivocal:
state = evaluator.ALARM if distilled else evaluator.OK
return state, None, statistics, number_outside
else:
trending_state = evaluator.ALARM if compared[-1] else evaluator.OK
return None, trending_state, statistics, number_outside
def _transition_alarm(self, alarm, state, trending_state, statistics,
outside_count):
unknown = alarm.state == evaluator.UNKNOWN
continuous = alarm.repeat_actions
if trending_state:
if unknown or continuous:
state = trending_state if unknown else alarm.state
reason, reason_data = self._reason(alarm, statistics, state,
outside_count)
self._refresh(alarm, state, reason, reason_data)
return
if state == evaluator.UNKNOWN and not unknown:
LOG.warning(_LW('Expecting %(expected)d datapoints but only get '
'%(actual)d') % {
'expected': alarm.rule['evaluation_periods'],
'actual': len(statistics)})
# Reason is not same as log message because we want to keep
# consistent since thirdparty software may depend on old format.
reason = _('%d datapoints are unknown') % alarm.rule[
'evaluation_periods']
last = None if not statistics else statistics[-1]
reason_data = self._reason_data('unknown',
alarm.rule['evaluation_periods'],
last)
self._refresh(alarm, state, reason, reason_data)
elif state and (alarm.state != state or continuous):
reason, reason_data = self._reason(alarm, statistics, state,
outside_count)
self._refresh(alarm, state, reason, reason_data)
def evaluate(self, alarm):
if not self.within_time_constraint(alarm):
LOG.debug('Attempted to evaluate alarm %s, but it is not '
'within its time constraint.', alarm.alarm_id)
return
state, trending_state, statistics, outside_count = self.evaluate_rule(
alarm.rule)
self._transition_alarm(alarm, state, trending_state, statistics,
outside_count)
|