Add two helper functions of irq_context: task_irq_context(): return the encoded irq_context of the task, the return value is encoded in the same as ->irq_context of held_lock. Always return 0 if !(CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING)
is_same_irq_context(): compare whether two irq_context are the same. Always return true if !(CONFIG_TRACE_IRQFLAGS && CONFIG_PROVE_LOCKING) These two functions are added for the future use of irq_context by LOCKED_ACCESS, because LOCKED_ACCESS needs to get the irq_context value of a task when a data access happens rather than when a lock is acquired, so it needs another way than getting the value from held_lock::irq_context Signed-off-by: Boqun Feng <boqun.f...@gmail.com> --- kernel/locking/lockdep.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 60ace56..f36df37 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -2839,6 +2839,16 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) return 1; } +static unsigned int task_irq_context(struct task_struct *task) +{ + return 2 * (task->hardirq_context ? 1 : 0) + task->softirq_context; +} + +static bool is_same_irq_context(unsigned int ctx1, unsigned int ctx2) +{ + return ctx1 == ctx2; +} + static int separate_irq_context(struct task_struct *curr, struct held_lock *hlock) { @@ -2847,8 +2857,7 @@ static int separate_irq_context(struct task_struct *curr, /* * Keep track of points where we cross into an interrupt context: */ - hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + - curr->softirq_context; + hlock->irq_context = task_irq_context(curr); if (depth) { struct held_lock *prev_hlock; @@ -2880,6 +2889,9 @@ static inline int mark_irqflags(struct task_struct *curr, return 1; } +#define task_irq_context(task) 0 +#define is_same_irq_context(ctx1, ctx2) true + static inline int separate_irq_context(struct task_struct *curr, struct held_lock *hlock) { -- 2.7.1