lib/ratelimit: Reduce false-positive and silent misses
Changes ------- * Reduce open-coded use of ratelimit_state structure fields. * Convert the ->missed field to atomic_t. * Count misses that are due to lock contention. * Eliminate jiffies=0 special case. * Reduce ___ratelimit() false-positive rate limiting (Petr Mladek). * Allow zero ->burst to hard-disable rate limiting. * Optimize away atomic operations when a miss is guaranteed. * Warn if ->interval or ->burst are negative (Petr Mladek). * Simplify the resulting code. A smoke test and stress test have been created, but they are not yet ready for mainline. With luck, we will offer them for the v6.17 merge window. -----BEGIN PGP SIGNATURE----- iQJHBAABCgAxFiEEbK7UrM+RBIrCoViJnr8S83LZ+4wFAmgzWdgTHHBhdWxtY2tA a2VybmVsLm9yZwAKCRCevxLzctn7jBnZD/wKD+5f8qwEuaib901yLb/s4ZkS3Aly mcPLAcTSFc7jp3c188V2qPAAgobQW+NnOLZ/TuB34tvS/Ngm/Yo1EPiHD2AXPzfY FPlgmjvOQQQo9dfA1PNOegh/aKYhMJrho85cilcM1TojuSSVbo1lG1FbuvqMJ9Ub jPHRB6KaFDnhwJkWHJ4Fjl1z1TQcyxjrBoswEcMCKapNqrm6IiXLvw03Nme3wa7F tr30xue5GV/FyHMa14g/8GSpZ88Lr5VGsOoC0wz2KhfMsZcFRkmslgm8mxHawwXj MbQaW7Th+fD7H0pC/lbHIiKaXvizYbQCPXr4qf5gfqNf4R/BHE1QdSTmP45kjEXO CmZEwwVx8cVdyoY9N9udDhNZly/U83G3F1n38jdM2SCPjn3F8BAanZRwakLEvmCC XUQ0bvzQvJl0LM5ktyaaMZdWf3p6uah7otryCPdsA5V7BFgyx5ZHniXY6v1JgfhX 2nmYRO3vEoQ39Z+vPtu7DvS64oe/aYgkSIoG68rKhSb4S+nIdIZ8zwB1af4Nkk8e YZwlwjIRw+RCu/QET4GXLE1tHQ031kWR/xG8nDnNGE3XCjnfRArAcdMZNDlc3U5k GT1g8zOJR2jmlEvWUZwpmclc1yeHXTK1P9nOHmzhxw26eiXmY353PZcND9Ktnt/a RH550D0vUqFM7A== =ivk+ -----END PGP SIGNATURE----- Merge tag 'ratelimit.2025.05.25a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu Pull rate-limit updates from Paul McKenney: "lib/ratelimit: Reduce false-positive and silent misses: - Reduce open-coded use of ratelimit_state structure fields. - Convert the ->missed field to atomic_t. - Count misses that are due to lock contention. - Eliminate jiffies=0 special case. - Reduce ___ratelimit() false-positive rate limiting (Petr Mladek). - Allow zero ->burst to hard-disable rate limiting. - Optimize away atomic operations when a miss is guaranteed. - Warn if ->interval or ->burst are negative (Petr Mladek). - Simplify the resulting code. A smoke test and stress test have been created, but they are not yet ready for mainline. With luck, we will offer them for the v6.17 merge window" * tag 'ratelimit.2025.05.25a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu: ratelimit: Drop redundant accesses to burst ratelimit: Use nolock_ret restructuring to collapse common case code ratelimit: Use nolock_ret label to collapse lock-failure code ratelimit: Use nolock_ret label to save a couple of lines of code ratelimit: Simplify common-case exit path ratelimit: Warn if ->interval or ->burst are negative ratelimit: Avoid atomic decrement under lock if already rate-limited ratelimit: Avoid atomic decrement if already rate-limited ratelimit: Don't flush misses counter if RATELIMIT_MSG_ON_RELEASE ratelimit: Force re-initialization when rate-limiting re-enabled ratelimit: Allow zero ->burst to disable ratelimiting ratelimit: Reduce ___ratelimit() false-positive rate limiting ratelimit: Avoid jiffies=0 special case ratelimit: Count misses due to lock contention ratelimit: Convert the ->missed field to atomic_t drm/amd/pm: Avoid open-coded use of ratelimit_state structure's internals drm/i915: Avoid open-coded use of ratelimit_state structure's ->missed field random: Avoid open-coded use of ratelimit_state structure's ->missed field ratelimit: Create functions to handle ratelimit_state internals
This commit is contained in:
commit
97851c6016
|
@ -727,6 +727,7 @@ static void __cold _credit_init_bits(size_t bits)
|
|||
static DECLARE_WORK(set_ready, crng_set_ready);
|
||||
unsigned int new, orig, add;
|
||||
unsigned long flags;
|
||||
int m;
|
||||
|
||||
if (!bits)
|
||||
return;
|
||||
|
@ -749,9 +750,9 @@ static void __cold _credit_init_bits(size_t bits)
|
|||
wake_up_interruptible(&crng_init_wait);
|
||||
kill_fasync(&fasync, SIGIO, POLL_IN);
|
||||
pr_notice("crng init done\n");
|
||||
if (urandom_warning.missed)
|
||||
pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
|
||||
urandom_warning.missed);
|
||||
m = ratelimit_state_get_miss(&urandom_warning);
|
||||
if (m)
|
||||
pr_notice("%d urandom warning(s) missed due to ratelimiting\n", m);
|
||||
} else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
|
||||
spin_lock_irqsave(&base_crng.lock, flags);
|
||||
/* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
|
||||
|
@ -1467,7 +1468,7 @@ static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
|
|||
|
||||
if (!crng_ready()) {
|
||||
if (!ratelimit_disable && maxwarn <= 0)
|
||||
++urandom_warning.missed;
|
||||
ratelimit_state_inc_miss(&urandom_warning);
|
||||
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
|
||||
--maxwarn;
|
||||
pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
|
||||
|
|
|
@ -1606,7 +1606,6 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
|
|||
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||
struct amdgpu_device *adev = drm_to_adev(ddev);
|
||||
long throttling_logging_interval;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
ret = kstrtol(buf, 0, &throttling_logging_interval);
|
||||
|
@ -1617,18 +1616,12 @@ static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
if (throttling_logging_interval > 0) {
|
||||
raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
|
||||
/*
|
||||
* Reset the ratelimit timer internals.
|
||||
* This can effectively restart the timer.
|
||||
*/
|
||||
adev->throttling_logging_rs.interval =
|
||||
(throttling_logging_interval - 1) * HZ;
|
||||
adev->throttling_logging_rs.begin = 0;
|
||||
adev->throttling_logging_rs.printed = 0;
|
||||
adev->throttling_logging_rs.missed = 0;
|
||||
raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
|
||||
|
||||
ratelimit_state_reset_interval(&adev->throttling_logging_rs,
|
||||
(throttling_logging_interval - 1) * HZ);
|
||||
atomic_set(&adev->throttling_logging_enabled, 1);
|
||||
} else {
|
||||
atomic_set(&adev->throttling_logging_enabled, 0);
|
||||
|
|
|
@ -1666,6 +1666,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
|
|||
struct i915_perf *perf = stream->perf;
|
||||
struct intel_gt *gt = stream->engine->gt;
|
||||
struct i915_perf_group *g = stream->engine->oa_group;
|
||||
int m;
|
||||
|
||||
if (WARN_ON(stream != g->exclusive_stream))
|
||||
return;
|
||||
|
@ -1690,10 +1691,9 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
|
|||
free_oa_configs(stream);
|
||||
free_noa_wait(stream);
|
||||
|
||||
if (perf->spurious_report_rs.missed) {
|
||||
gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n",
|
||||
perf->spurious_report_rs.missed);
|
||||
}
|
||||
m = ratelimit_state_get_miss(&perf->spurious_report_rs);
|
||||
if (m)
|
||||
gt_notice(gt, "%d spurious OA report notices suppressed due to ratelimiting\n", m);
|
||||
}
|
||||
|
||||
static void gen7_init_oa_buffer(struct i915_perf_stream *stream)
|
||||
|
|
|
@ -22,16 +22,43 @@ static inline void ratelimit_default_init(struct ratelimit_state *rs)
|
|||
DEFAULT_RATELIMIT_BURST);
|
||||
}
|
||||
|
||||
static inline void ratelimit_state_inc_miss(struct ratelimit_state *rs)
|
||||
{
|
||||
atomic_inc(&rs->missed);
|
||||
}
|
||||
|
||||
static inline int ratelimit_state_get_miss(struct ratelimit_state *rs)
|
||||
{
|
||||
return atomic_read(&rs->missed);
|
||||
}
|
||||
|
||||
static inline int ratelimit_state_reset_miss(struct ratelimit_state *rs)
|
||||
{
|
||||
return atomic_xchg_relaxed(&rs->missed, 0);
|
||||
}
|
||||
|
||||
static inline void ratelimit_state_reset_interval(struct ratelimit_state *rs, int interval_init)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&rs->lock, flags);
|
||||
rs->interval = interval_init;
|
||||
rs->flags &= ~RATELIMIT_INITIALIZED;
|
||||
atomic_set(&rs->rs_n_left, rs->burst);
|
||||
ratelimit_state_reset_miss(rs);
|
||||
raw_spin_unlock_irqrestore(&rs->lock, flags);
|
||||
}
|
||||
|
||||
static inline void ratelimit_state_exit(struct ratelimit_state *rs)
|
||||
{
|
||||
int m;
|
||||
|
||||
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE))
|
||||
return;
|
||||
|
||||
if (rs->missed) {
|
||||
pr_warn("%s: %d output lines suppressed due to ratelimiting\n",
|
||||
current->comm, rs->missed);
|
||||
rs->missed = 0;
|
||||
}
|
||||
m = ratelimit_state_reset_miss(rs);
|
||||
if (m)
|
||||
pr_warn("%s: %d output lines suppressed due to ratelimiting\n", current->comm, m);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
|
|
@ -11,14 +11,15 @@
|
|||
|
||||
/* issue num suppressed message on exit */
|
||||
#define RATELIMIT_MSG_ON_RELEASE BIT(0)
|
||||
#define RATELIMIT_INITIALIZED BIT(1)
|
||||
|
||||
struct ratelimit_state {
|
||||
raw_spinlock_t lock; /* protect the state */
|
||||
|
||||
int interval;
|
||||
int burst;
|
||||
int printed;
|
||||
int missed;
|
||||
atomic_t rs_n_left;
|
||||
atomic_t missed;
|
||||
unsigned int flags;
|
||||
unsigned long begin;
|
||||
};
|
||||
|
|
|
@ -33,44 +33,73 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
|||
int interval = READ_ONCE(rs->interval);
|
||||
int burst = READ_ONCE(rs->burst);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!interval)
|
||||
return 1;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If we contend on this state's lock then almost
|
||||
* by definition we are too busy to print a message,
|
||||
* in addition to the one that will be printed by
|
||||
* the entity that is holding the lock already:
|
||||
* Zero interval says never limit, otherwise, non-positive burst
|
||||
* says always limit.
|
||||
*/
|
||||
if (!raw_spin_trylock_irqsave(&rs->lock, flags))
|
||||
return 0;
|
||||
if (interval <= 0 || burst <= 0) {
|
||||
WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n", interval, burst);
|
||||
ret = interval == 0 || burst > 0;
|
||||
if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) ||
|
||||
!raw_spin_trylock_irqsave(&rs->lock, flags))
|
||||
goto nolock_ret;
|
||||
|
||||
if (!rs->begin)
|
||||
/* Force re-initialization once re-enabled. */
|
||||
rs->flags &= ~RATELIMIT_INITIALIZED;
|
||||
goto unlock_ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we contend on this state's lock then just check if
|
||||
* the current burst is used or not. It might cause
|
||||
* false positive when we are past the interval and
|
||||
* the current lock owner is just about to reset it.
|
||||
*/
|
||||
if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
|
||||
if (READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED &&
|
||||
atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
|
||||
ret = 1;
|
||||
goto nolock_ret;
|
||||
}
|
||||
|
||||
if (!(rs->flags & RATELIMIT_INITIALIZED)) {
|
||||
rs->begin = jiffies;
|
||||
rs->flags |= RATELIMIT_INITIALIZED;
|
||||
atomic_set(&rs->rs_n_left, rs->burst);
|
||||
}
|
||||
|
||||
if (time_is_before_jiffies(rs->begin + interval)) {
|
||||
if (rs->missed) {
|
||||
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
|
||||
int m;
|
||||
|
||||
/*
|
||||
* Reset rs_n_left ASAP to reduce false positives
|
||||
* in parallel calls, see above.
|
||||
*/
|
||||
atomic_set(&rs->rs_n_left, rs->burst);
|
||||
rs->begin = jiffies;
|
||||
|
||||
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
|
||||
m = ratelimit_state_reset_miss(rs);
|
||||
if (m) {
|
||||
printk_deferred(KERN_WARNING
|
||||
"%s: %d callbacks suppressed\n",
|
||||
func, rs->missed);
|
||||
rs->missed = 0;
|
||||
"%s: %d callbacks suppressed\n", func, m);
|
||||
}
|
||||
}
|
||||
rs->begin = jiffies;
|
||||
rs->printed = 0;
|
||||
}
|
||||
if (burst && burst > rs->printed) {
|
||||
rs->printed++;
|
||||
|
||||
/* Note that the burst might be taken by a parallel call. */
|
||||
if (atomic_read(&rs->rs_n_left) > 0 && atomic_dec_return(&rs->rs_n_left) >= 0)
|
||||
ret = 1;
|
||||
} else {
|
||||
rs->missed++;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
unlock_ret:
|
||||
raw_spin_unlock_irqrestore(&rs->lock, flags);
|
||||
|
||||
nolock_ret:
|
||||
if (!ret)
|
||||
ratelimit_state_inc_miss(rs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(___ratelimit);
|
||||
|
|
Loading…
Reference in New Issue