| /* SPDX-License-Identifier: GPL-2.0+ */ |
| /* |
| * RCU segmented callback lists, internal-to-rcu header file |
| * |
| * Copyright IBM Corporation, 2017 |
| * |
| * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
| */ |
| |
| #include <linux/rcu_segcblist.h> |
| |
| /* Return number of callbacks in the specified callback list. */ |
| static inline long rcu_cblist_n_cbs(struct rcu_cblist *rclp) |
| { |
| return READ_ONCE(rclp->len); |
| } |
| |
| /* |
| * Account for the fact that a previously dequeued callback turned out |
| * to be marked as lazy. |
| */ |
| static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp) |
| { |
| rclp->len_lazy--; |
| } |
| |
| void rcu_cblist_init(struct rcu_cblist *rclp); |
| void rcu_cblist_enqueue(struct rcu_cblist *rclp, struct rcu_head *rhp); |
| void rcu_cblist_flush_enqueue(struct rcu_cblist *drclp, |
| struct rcu_cblist *srclp, |
| struct rcu_head *rhp); |
| struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp); |
| |
| /* |
| * Is the specified rcu_segcblist structure empty? |
| * |
| * But careful! The fact that the ->head field is NULL does not |
| * necessarily imply that there are no callbacks associated with |
| * this structure. When callbacks are being invoked, they are |
| * removed as a group. If callback invocation must be preempted, |
| * the remaining callbacks will be added back to the list. Either |
| * way, the counts are updated later. |
| * |
| * So it is often the case that rcu_segcblist_n_cbs() should be used |
| * instead. |
| */ |
| static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp) |
| { |
| return !READ_ONCE(rsclp->head); |
| } |
| |
| /* Return number of callbacks in segmented callback list. */ |
| static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp) |
| { |
| #ifdef CONFIG_RCU_NOCB_CPU |
| return atomic_long_read(&rsclp->len); |
| #else |
| return READ_ONCE(rsclp->len); |
| #endif |
| } |
| |
| /* Return number of lazy callbacks in segmented callback list. */ |
| static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp) |
| { |
| return rsclp->len_lazy; |
| } |
| |
| /* Return number of lazy callbacks in segmented callback list. */ |
| static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp) |
| { |
| return rcu_segcblist_n_cbs(rsclp) - rsclp->len_lazy; |
| } |
| |
| /* |
| * Is the specified rcu_segcblist enabled, for example, not corresponding |
| * to an offline CPU? |
| */ |
| static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp) |
| { |
| return rsclp->enabled; |
| } |
| |
| /* Is the specified rcu_segcblist offloaded? */ |
| static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp) |
| { |
| return rsclp->offloaded; |
| } |
| |
| /* |
| * Are all segments following the specified segment of the specified |
| * rcu_segcblist structure empty of callbacks? (The specified |
| * segment might well contain callbacks.) |
| */ |
| static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg) |
| { |
| return !READ_ONCE(*READ_ONCE(rsclp->tails[seg])); |
| } |
| |
| void rcu_segcblist_inc_len(struct rcu_segcblist *rsclp); |
| void rcu_segcblist_init(struct rcu_segcblist *rsclp); |
| void rcu_segcblist_disable(struct rcu_segcblist *rsclp); |
| void rcu_segcblist_offload(struct rcu_segcblist *rsclp); |
| bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp); |
| bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp); |
| struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp); |
| struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp); |
| bool rcu_segcblist_nextgp(struct rcu_segcblist *rsclp, unsigned long *lp); |
| void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp, |
| struct rcu_head *rhp, bool lazy); |
| bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp, |
| struct rcu_head *rhp, bool lazy); |
| void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp, |
| struct rcu_cblist *rclp); |
| void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp, |
| struct rcu_cblist *rclp); |
| void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp, |
| struct rcu_cblist *rclp); |
| void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp, |
| struct rcu_cblist *rclp); |
| void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp, |
| struct rcu_cblist *rclp); |
| void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp, |
| struct rcu_cblist *rclp); |
| void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq); |
| bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq); |
| void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp, |
| struct rcu_segcblist *src_rsclp); |