Blame view

kernel/linux-rt-4.4.41/include/linux/lglock.h 3.17 KB
5113f6f70   김현기   kernel add
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  /*
   * Specialised local-global spinlock. Can only be declared as global variables
   * to avoid overhead and keep things simple (and we don't want to start using
   * these inside dynamically allocated structures).
   *
   * "local/global locks" (lglocks) can be used to:
   *
   * - Provide fast exclusive access to per-CPU data, with exclusive access to
   *   another CPU's data allowed but possibly subject to contention, and to
   *   provide very slow exclusive access to all per-CPU data.
   * - Or to provide very fast and scalable read serialisation, and to provide
   *   very slow exclusive serialisation of data (not necessarily per-CPU data).
   *
   * Brlocks are also implemented as a short-hand notation for the latter use
   * case.
   *
   * Copyright 2009, 2010, Nick Piggin, Novell Inc.
   */
  #ifndef __LINUX_LGLOCK_H
  #define __LINUX_LGLOCK_H
  
  #include <linux/spinlock.h>
  #include <linux/lockdep.h>
  #include <linux/percpu.h>
  #include <linux/cpu.h>
  #include <linux/notifier.h>
  
  #ifdef CONFIG_SMP
  
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  #define LOCKDEP_INIT_MAP lockdep_init_map
  #else
  #define LOCKDEP_INIT_MAP(a, b, c, d)
  #endif
  
  struct lglock {
  #ifdef CONFIG_PREEMPT_RT_FULL
  	struct rt_mutex __percpu *lock;
  #else
  	arch_spinlock_t __percpu *lock;
  #endif
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
  	struct lock_class_key lock_key;
  	struct lockdep_map    lock_dep_map;
  #endif
  };
  
  #ifdef CONFIG_PREEMPT_RT_FULL
  # define DEFINE_LGLOCK(name)						\
  	static DEFINE_PER_CPU(struct rt_mutex, name ## _lock)		\
  	= __RT_MUTEX_INITIALIZER( name ## _lock);			\
  	struct lglock name = { .lock = &name ## _lock }
  
  # define DEFINE_STATIC_LGLOCK(name)					\
  	static DEFINE_PER_CPU(struct rt_mutex, name ## _lock)		\
  	= __RT_MUTEX_INITIALIZER( name ## _lock);			\
  	static struct lglock name = { .lock = &name ## _lock }
  
  #else
  
  #define DEFINE_LGLOCK(name)						\
  	static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)		\
  	= __ARCH_SPIN_LOCK_UNLOCKED;					\
  	struct lglock name = { .lock = &name ## _lock }
  
  #define DEFINE_STATIC_LGLOCK(name)					\
  	static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)		\
  	= __ARCH_SPIN_LOCK_UNLOCKED;					\
  	static struct lglock name = { .lock = &name ## _lock }
  #endif
  
  void lg_lock_init(struct lglock *lg, char *name);
  
  void lg_local_lock(struct lglock *lg);
  void lg_local_unlock(struct lglock *lg);
  void lg_local_lock_cpu(struct lglock *lg, int cpu);
  void lg_local_unlock_cpu(struct lglock *lg, int cpu);
  
  void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
  void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
  
  void lg_global_lock(struct lglock *lg);
  void lg_global_unlock(struct lglock *lg);
  
  #ifndef CONFIG_PREEMPT_RT_FULL
  #define lg_global_trylock_relax(name)	lg_global_lock(name)
  #else
  void lg_global_trylock_relax(struct lglock *lg);
  #endif
  
  #else
  /* When !CONFIG_SMP, map lglock to spinlock */
  #define lglock spinlock
  #define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name)
  #define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name)
  #define lg_lock_init(lg, name) spin_lock_init(lg)
  #define lg_local_lock spin_lock
  #define lg_local_unlock spin_unlock
  #define lg_local_lock_cpu(lg, cpu) spin_lock(lg)
  #define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg)
  #define lg_global_lock spin_lock
  #define lg_global_unlock spin_unlock
  #endif
  
  #endif