aboutsummaryrefslogtreecommitdiff
path: root/arch/metag/include/asm/atomic_lock1.h
blob: f5d5898c10201cb32a94044ec53a4e2d5ef3b01a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
#ifndef __ASM_METAG_ATOMIC_LOCK1_H
#define __ASM_METAG_ATOMIC_LOCK1_H

#define ATOMIC_INIT(i)	{ (i) }

#include <linux/compiler.h>

#include <asm/barrier.h>
#include <asm/global_lock.h>

static inline int atomic_read(const atomic_t *v)
{
	return (v)->counter;
}

/*
 * atomic_set needs to be take the lock to protect atomic_add_unless from a
 * possible race, as it reads the counter twice:
 *
 *  CPU0                               CPU1
 *  atomic_add_unless(1, 0)
 *    ret = v->counter (non-zero)
 *    if (ret != u)                    v->counter = 0
 *      v->counter += 1 (counter set to 1)
 *
 * Making atomic_set take the lock ensures that ordering and logical
 * consistency is preserved.
 */
static inline int atomic_set(atomic_t *v, int i)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter = i;
	__global_unlock1(flags);
	return i;
}

#define ATOMIC_OP(op, c_op)						\
static inline void atomic_##op(int i, atomic_t *v)			\
{									\
	unsigned long flags;						\
									\
	__global_lock1(flags);						\
	fence();							\
	v->counter c_op i;						\
	__global_unlock1(flags);					\
}									\

#define ATOMIC_OP_RETURN(op, c_op)					\
static inline int atomic_##op##_return(int i, atomic_t *v)		\
{									\
	unsigned long result;						\
	unsigned long flags;						\
									\
	__global_lock1(flags);						\
	result = v->counter;						\
	result c_op i;							\
	fence();							\
	v->counter = result;						\
	__global_unlock1(flags);					\
									\
	return result;							\
}

#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)

ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)

#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter &= ~mask;
	__global_unlock1(flags);
}

static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
	unsigned long flags;

	__global_lock1(flags);
	fence();
	v->counter |= mask;
	__global_unlock1(flags);
}

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
	int ret;
	unsigned long flags;

	__global_lock1(flags);
	ret = v->counter;
	if (ret == old) {
		fence();
		v->counter = new;
	}
	__global_unlock1(flags);

	return ret;
}

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))

static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
	int ret;
	unsigned long flags;

	__global_lock1(flags);
	ret = v->counter;
	if (ret != u) {
		fence();
		v->counter += a;
	}
	__global_unlock1(flags);

	return ret;
}

static inline int atomic_sub_if_positive(int i, atomic_t *v)
{
	int ret;
	unsigned long flags;

	__global_lock1(flags);
	ret = v->counter - 1;
	if (ret >= 0) {
		fence();
		v->counter = ret;
	}
	__global_unlock1(flags);

	return ret;
}

#endif /* __ASM_METAG_ATOMIC_LOCK1_H */