]> git.ozlabs.org Git - ppp.git/blob - pppd/spinlock.c
Use autoconf/automake to configure and make ppp
[ppp.git] / pppd / spinlock.c
1 /* 
2    Unix SMB/CIFS implementation.
3
4    trivial database library 
5
6    Copyright (C) Anton Blanchard                   2001
7    
8      ** NOTE! The following LGPL license applies to the tdb
9      ** library. This does NOT imply that all of Samba is released
10      ** under the LGPL
11    
12    This library is free software; you can redistribute it and/or
13    modify it under the terms of the GNU Lesser General Public
14    License as published by the Free Software Foundation; either
15    version 2 of the License, or (at your option) any later version.
16
17    This library is distributed in the hope that it will be useful,
18    but WITHOUT ANY WARRANTY; without even the implied warranty of
19    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20    Lesser General Public License for more details.
21    
22    You should have received a copy of the GNU Lesser General Public
23    License along with this library; if not, write to the Free Software
24    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
25 */
26
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30
31 #include <stdlib.h>
32 #include <stdio.h>
33 #include <unistd.h>
34 #include <string.h>
35 #include <fcntl.h>
36 #include <errno.h>
37 #include <sys/stat.h>
38 #include <time.h>
39 #include <signal.h>
40 #include "tdb.h"
41 #include "spinlock.h"
42
43 #define DEBUG
44
45 #ifdef USE_SPINLOCKS
46
47 /*
48  * ARCH SPECIFIC
49  */
50
51 #if defined(SPARC_SPINLOCKS)
52
53 static inline int __spin_trylock(spinlock_t *lock)
54 {
55         unsigned int result;
56
57         asm volatile("ldstub    [%1], %0"
58                 : "=r" (result)
59                 : "r" (lock)
60                 : "memory");
61
62         return (result == 0) ? 0 : EBUSY;
63 }
64
65 static inline void __spin_unlock(spinlock_t *lock)
66 {
67         asm volatile("":::"memory");
68         *lock = 0;
69 }
70
71 static inline void __spin_lock_init(spinlock_t *lock)
72 {
73         *lock = 0;
74 }
75
76 static inline int __spin_is_locked(spinlock_t *lock)
77 {
78         return (*lock != 0);
79 }
80
81 #elif defined(POWERPC_SPINLOCKS) 
82
83 static inline int __spin_trylock(spinlock_t *lock)
84 {
85         unsigned int result;
86
87         __asm__ __volatile__(
88 "1:     lwarx           %0,0,%1\n\
89         cmpwi           0,%0,0\n\
90         li              %0,0\n\
91         bne-            2f\n\
92         li              %0,1\n\
93         stwcx.          %0,0,%1\n\
94         bne-            1b\n\
95         isync\n\
96 2:"     : "=&r"(result)
97         : "r"(lock)
98         : "cr0", "memory");
99
100         return (result == 1) ? 0 : EBUSY;
101 }
102
103 static inline void __spin_unlock(spinlock_t *lock)
104 {
105         asm volatile("eieio":::"memory");
106         *lock = 0;
107 }
108
109 static inline void __spin_lock_init(spinlock_t *lock)
110 {
111         *lock = 0;
112 }
113
114 static inline int __spin_is_locked(spinlock_t *lock)
115 {
116         return (*lock != 0);
117 }
118
119 #elif defined(INTEL_SPINLOCKS) 
120
121 static inline int __spin_trylock(spinlock_t *lock)
122 {
123         int oldval;
124
125         asm volatile("xchgl %0,%1"
126                 : "=r" (oldval), "=m" (*lock)
127                 : "0" (0)
128                 : "memory");
129
130         return oldval > 0 ? 0 : EBUSY;
131 }
132
133 static inline void __spin_unlock(spinlock_t *lock)
134 {
135         asm volatile("":::"memory");
136         *lock = 1;
137 }
138
139 static inline void __spin_lock_init(spinlock_t *lock)
140 {
141         *lock = 1;
142 }
143
144 static inline int __spin_is_locked(spinlock_t *lock)
145 {
146         return (*lock != 1);
147 }
148
149 #elif defined(MIPS_SPINLOCKS) && defined(sgi) && (_COMPILER_VERSION >= 730)
150
151 /* Implement spinlocks on IRIX using the MIPSPro atomic fetch operations. See
152  * sync(3) for the details of the intrinsic operations.
153  *
154  * "sgi" and "_COMPILER_VERSION" are always defined by MIPSPro.
155  */
156
157 #ifdef STANDALONE
158
159 /* MIPSPro 7.3 has "__inline" as an extension, but not "inline. */
160 #define inline __inline
161
162 #endif /* STANDALONE */
163
164 /* Returns 0 if the lock is acquired, EBUSY otherwise. */
165 static inline int __spin_trylock(spinlock_t *lock)
166 {
167         unsigned int val;
168         val = __lock_test_and_set(lock, 1);
169         return val == 0 ? 0 : EBUSY;
170 }
171
172 static inline void __spin_unlock(spinlock_t *lock)
173 {
174         __lock_release(lock);
175 }
176
177 static inline void __spin_lock_init(spinlock_t *lock)
178 {
179         __lock_release(lock);
180 }
181
182 /* Returns 1 if the lock is held, 0 otherwise. */
183 static inline int __spin_is_locked(spinlock_t *lock)
184 {
185         unsigned int val;
186         val = __add_and_fetch(lock, 0);
187         return val;
188 }
189
190 #elif defined(MIPS_SPINLOCKS) 
191
192 static inline unsigned int load_linked(unsigned long addr)
193 {
194         unsigned int res;
195
196         __asm__ __volatile__("ll\t%0,(%1)"
197                 : "=r" (res)
198                 : "r" (addr));
199
200         return res;
201 }
202
203 static inline unsigned int store_conditional(unsigned long addr, unsigned int value)
204 {
205         unsigned int res;
206
207         __asm__ __volatile__("sc\t%0,(%2)"
208                 : "=r" (res)
209                 : "0" (value), "r" (addr));
210         return res;
211 }
212
213 static inline int __spin_trylock(spinlock_t *lock)
214 {
215         unsigned int mw;
216
217         do {
218                 mw = load_linked(lock);
219                 if (mw) 
220                         return EBUSY;
221         } while (!store_conditional(lock, 1));
222
223         asm volatile("":::"memory");
224
225         return 0;
226 }
227
228 static inline void __spin_unlock(spinlock_t *lock)
229 {
230         asm volatile("":::"memory");
231         *lock = 0;
232 }
233
234 static inline void __spin_lock_init(spinlock_t *lock)
235 {
236         *lock = 0;
237 }
238
239 static inline int __spin_is_locked(spinlock_t *lock)
240 {
241         return (*lock != 0);
242 }
243
244 #else
245 #error Need to implement spinlock code in spinlock.c
246 #endif
247
248 /*
249  * OS SPECIFIC
250  */
251
252 static void yield_cpu(void)
253 {
254         struct timespec tm;
255
256 #ifdef USE_SCHED_YIELD
257         sched_yield();
258 #else
259         /* Linux will busy loop for delays < 2ms on real time tasks */
260         tm.tv_sec = 0;
261         tm.tv_nsec = 2000000L + 1;
262         nanosleep(&tm, NULL);
263 #endif
264 }
265
266 static int this_is_smp(void)
267 {
268 #if defined(HAVE_SYSCONF) && defined(SYSCONF_SC_NPROC_ONLN)
269         return (sysconf(_SC_NPROC_ONLN) > 1) ? 1 : 0;
270 #else
271         return 0;
272 #endif
273 }
274
275 /*
276  * GENERIC
277  */
278
279 static int smp_machine = 0;
280
281 static inline void __spin_lock(spinlock_t *lock)
282 {
283         int ntries = 0;
284
285         while(__spin_trylock(lock)) {
286                 while(__spin_is_locked(lock)) {
287                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
288                                 continue;
289                         yield_cpu();
290                 }
291         }
292 }
293
294 static void __read_lock(tdb_rwlock_t *rwlock)
295 {
296         int ntries = 0;
297
298         while(1) {
299                 __spin_lock(&rwlock->lock);
300
301                 if (!(rwlock->count & RWLOCK_BIAS)) {
302                         rwlock->count++;
303                         __spin_unlock(&rwlock->lock);
304                         return;
305                 }
306         
307                 __spin_unlock(&rwlock->lock);
308
309                 while(rwlock->count & RWLOCK_BIAS) {
310                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
311                                 continue;
312                         yield_cpu();
313                 }
314         }
315 }
316
317 static void __write_lock(tdb_rwlock_t *rwlock)
318 {
319         int ntries = 0;
320
321         while(1) {
322                 __spin_lock(&rwlock->lock);
323
324                 if (rwlock->count == 0) {
325                         rwlock->count |= RWLOCK_BIAS;
326                         __spin_unlock(&rwlock->lock);
327                         return;
328                 }
329
330                 __spin_unlock(&rwlock->lock);
331
332                 while(rwlock->count != 0) {
333                         if (smp_machine && ntries++ < MAX_BUSY_LOOPS)
334                                 continue;
335                         yield_cpu();
336                 }
337         }
338 }
339
340 static void __write_unlock(tdb_rwlock_t *rwlock)
341 {
342         __spin_lock(&rwlock->lock);
343
344 #ifdef DEBUG
345         if (!(rwlock->count & RWLOCK_BIAS))
346                 fprintf(stderr, "bug: write_unlock\n");
347 #endif
348
349         rwlock->count &= ~RWLOCK_BIAS;
350         __spin_unlock(&rwlock->lock);
351 }
352
353 static void __read_unlock(tdb_rwlock_t *rwlock)
354 {
355         __spin_lock(&rwlock->lock);
356
357 #ifdef DEBUG
358         if (!rwlock->count)
359                 fprintf(stderr, "bug: read_unlock\n");
360
361         if (rwlock->count & RWLOCK_BIAS)
362                 fprintf(stderr, "bug: read_unlock\n");
363 #endif
364
365         rwlock->count--;
366         __spin_unlock(&rwlock->lock);
367 }
368
369 /* TDB SPECIFIC */
370
371 /* lock a list in the database. list -1 is the alloc list */
372 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type)
373 {
374         tdb_rwlock_t *rwlocks;
375
376         if (!tdb->map_ptr) return -1;
377         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
378
379         switch(rw_type) {
380         case F_RDLCK:
381                 __read_lock(&rwlocks[list+1]);
382                 break;
383
384         case F_WRLCK:
385                 __write_lock(&rwlocks[list+1]);
386                 break;
387
388         default:
389                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
390         }
391         return 0;
392 }
393
394 /* unlock the database. */
395 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type)
396 {
397         tdb_rwlock_t *rwlocks;
398
399         if (!tdb->map_ptr) return -1;
400         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
401
402         switch(rw_type) {
403         case F_RDLCK:
404                 __read_unlock(&rwlocks[list+1]);
405                 break;
406
407         case F_WRLCK:
408                 __write_unlock(&rwlocks[list+1]);
409                 break;
410
411         default:
412                 return TDB_ERRCODE(TDB_ERR_LOCK, -1);
413         }
414
415         return 0;
416 }
417
418 int tdb_create_rwlocks(int fd, unsigned int hash_size)
419 {
420         unsigned size, i;
421         tdb_rwlock_t *rwlocks;
422
423         size = TDB_SPINLOCK_SIZE(hash_size);
424         rwlocks = malloc(size);
425         if (!rwlocks)
426                 return -1;
427
428         for(i = 0; i < hash_size+1; i++) {
429                 __spin_lock_init(&rwlocks[i].lock);
430                 rwlocks[i].count = 0;
431         }
432
433         /* Write it out (appending to end) */
434         if (write(fd, rwlocks, size) != size) {
435                 free(rwlocks);
436                 return -1;
437         }
438         smp_machine = this_is_smp();
439         free(rwlocks);
440         return 0;
441 }
442
443 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
444 {
445         tdb_rwlock_t *rwlocks;
446         unsigned i;
447
448         if (tdb->header.rwlocks == 0) return 0;
449         if (!tdb->map_ptr) return -1;
450
451         /* We're mmapped here */
452         rwlocks = (tdb_rwlock_t *)((char *)tdb->map_ptr + tdb->header.rwlocks);
453         for(i = 0; i < tdb->header.hash_size+1; i++) {
454                 __spin_lock_init(&rwlocks[i].lock);
455                 rwlocks[i].count = 0;
456         }
457         return 0;
458 }
459 #else
460 int tdb_create_rwlocks(int fd, unsigned int hash_size) { return 0; }
461 int tdb_spinlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
462 int tdb_spinunlock(TDB_CONTEXT *tdb, int list, int rw_type) { return -1; }
463
464 /* Non-spinlock version: remove spinlock pointer */
465 int tdb_clear_spinlocks(TDB_CONTEXT *tdb)
466 {
467         tdb_off off = (tdb_off)((char *)&tdb->header.rwlocks
468                                 - (char *)&tdb->header);
469
470         tdb->header.rwlocks = 0;
471         if (lseek(tdb->fd, off, SEEK_SET) != off
472             || write(tdb->fd, (void *)&tdb->header.rwlocks,
473                      sizeof(tdb->header.rwlocks)) 
474             != sizeof(tdb->header.rwlocks))
475                 return -1;
476         return 0;
477 }
478 #endif