Blame view

kernel/linux-imx6_3.14.28/include/linux/lockref.h 1.38 KB
6b13f685e   김민수   BSP 최초 추가
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
  #ifndef __LINUX_LOCKREF_H
  #define __LINUX_LOCKREF_H
  
  /*
   * Locked reference counts.
   *
   * These are different from just plain atomic refcounts in that they
   * are atomic with respect to the spinlock that goes with them.  In
   * particular, there can be implementations that don't actually get
   * the spinlock for the common decrement/increment operations, but they
   * still have to check that the operation is done semantically as if
   * the spinlock had been taken (using a cmpxchg operation that covers
   * both the lock and the count word, or using memory transactions, for
   * example).
   */
  
  #include <linux/spinlock.h>
  #include <generated/bounds.h>
  
  #define USE_CMPXCHG_LOCKREF \
  	(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
  	 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
  
  struct lockref {
  	union {
  #if USE_CMPXCHG_LOCKREF
  		aligned_u64 lock_count;
  #endif
  		struct {
  			spinlock_t lock;
  			unsigned int count;
  		};
  	};
  };
  
  extern void lockref_get(struct lockref *);
  extern int lockref_get_not_zero(struct lockref *);
  extern int lockref_get_or_lock(struct lockref *);
  extern int lockref_put_or_lock(struct lockref *);
  
  extern void lockref_mark_dead(struct lockref *);
  extern int lockref_get_not_dead(struct lockref *);
  
  /* Must be called under spinlock for reliable results */
  static inline int __lockref_is_dead(const struct lockref *l)
  {
  	return ((int)l->count < 0);
  }
  
  #endif /* __LINUX_LOCKREF_H */