#ifndef __LINUX_SEQLOCK_H #define __LINUX_SEQLOCK_H /* * Reader/writer consistent mechanism without starving writers. This type of * lock for data where the reader wants a consitent set of information * and is willing to retry if the information changes. Readers never * block but they may have to retry if a writer is in * progress. Writers do not wait for readers. * * This is not as cache friendly as brlock. Also, this will not work * for data that contains pointers, because any writer could * invalidate a pointer that a reader was following. * * Expected reader usage: * do { * seq = seq_read_begin(&foo); * ... * } while (seq_read_end(&foo, seq)); * * * Based on x86_64 vsyscall gettimeofday * by Keith Owens and Andrea Arcangeli */ #include #include #include /* Combination of spinlock for writing and sequence update for readers */ typedef struct { unsigned long sequence; spinlock_t lock; } seqlock_t; /* * These macros triggered gcc-3.x compile-time problems. We think these are * OK now. Be cautious. */ #define SEQ_LOCK_UNLOCKED { SEQ_INIT, SPIN_LOCK_UNLOCKED } #define seqlock_init(x) do { *(x) = (seqlock_t) SEQ_LOCK_UNLOCKED; } while (0) /* Lock out other writers and update the count. * Acts like a normal spin_lock/unlock. * Don't need preempt_disable() because that is in the spin_lock already. */ static inline void seq_write_lock(seqlock_t *rw) { spin_lock(&rw->lock); ++rw->sequence; wmb(); } static inline void seq_write_unlock(seqlock_t *rw) { wmb(); rw->sequence++; spin_unlock(&rw->lock); } static inline int seq_write_trylock(seqlock_t *rw) { int ret = spin_trylock(&rw->lock); if (ret) { ++rw->sequence; wmb(); } return ret; } /* Start of read calculation -- fetch last complete writer token */ static inline unsigned seq_read_begin(const seqlock_t *s) { unsigned ret = s->sequence; rmb(); return ret; } /* End of read calculation -- check if sequence matches */ static inline int seq_read_end(const seqlock_t *s, unsigned iv) { rmb(); return unlikely((s->sequence != iv) || (iv & 1)); } /* * Possible sw/hw IRQ protected versions of the interfaces. */ #define seq_write_lock_irqsave(lock, flags) \ do { local_irq_save(flags); seq_write_lock(lock); } while (0) #define seq_write_lock_irq(lock) \ do { local_irq_disable(); seq_write_lock(lock); } while (0) #define seq_write_lock_bh(lock) \ do { local_bh_disable(); seq_write_lock(lock); } while (0) #define seq_write_unlock_irqrestore(lock, flags) \ do { seq_write_unlock(lock); local_irq_restore(flags); } while(0) #define seq_write_unlock_irq(lock) \ do { seq_write_unlock(lock); local_irq_enable(); } while(0) #define seq_write_unlock_bh(lock) \ do { seq_write_unlock(lock); local_bh_enable(); } while(0) #define seq_read_lock_irqsave(lock, flags) \ ({ local_irq_save(flags); seqlock_read_begin(lock); }) #define seq_read_lock_irqrestore(lock, iv, flags) \ unlikely({int ret = seq_read_end(&(lock)->seq, iv); \ local_irq_save(flags); \ ret; \ }) #endif /* __LINUX_SEQLOCK_H */