2012年3月10日

[ANNOUNCE] 3.0.23-rt39


Dear RT Folks,

I'm pleased to announce the 3.0.23-rt39 stable release.


You can get this release via the git tree at:

git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-stable-rt.git

Head SHA1: abced2ac3332f6c70005c462c6bab6157f17f04b


Or to build 3.0.23-rt39 directly, the following patches should be applied:

http://www.kernel.org/pub/linux/kernel/v3.0/linux-3.0.tar.xz

http://www.kernel.org/pub/linux/kernel/v3.0/patch-3.0.23.xz

http://www.kernel.org/pub/linux/kernel/projects/rt/3.0/patch-3.0.23-rt39.patch.xz


You can also build from 3.0.23-rt38 by applying the incremental patch:

http://www.kernel.org/pub/linux/kernel/projects/rt/3.0/incr/patch-3.0.23-rt38-rt39.patch.xz

Enjoy,

-- Steve


Changes from 3.0.23-rt38:

---

Andy Lutomirski (2):
x86-64-remove-vsyscall-number-3
x86-64-emulate-legacy-vsyscalls

Steven Rostedt (11):
revert-convert-xtime_lock-to-raw_seqlock
revert-seqlock-create-raw_seqlock
revert-seqlock-use-seqcount
revert-seqlock-remove-unused-functions
timer: Fix hotplug for -rt
futex/rt: Fix possible lockup when taking pi_lock in proxy handler
ring-buffer/rt: Check for irqs disabled before grabbing reader lock
sched/rt: Fix wait_task_interactive() to test rt_spin_lock state
lglock/rt: Use non-rt for_each_cpu() in -rt code
cpu: Make hotplug.lock a "sleeping" spinlock on RT
Linux 3.0.23-rt39

Thomas Gleixner (12):
x86: vdso: Remove bogus locking in update_vsyscall_tz()
x86: vdso: Use seqcount instead of seqlock
ia64: vsyscall: Use seqcount instead of seqlock
seqlock: Remove unused functions
seqlock: Use seqcount
seqlock: Provide seq_spin_* functions
fs: fs_struct use seqlock
fs: dentry use seqlock
timekeeping: Split xtime_lock
seqlock: Prevent rt starvation
fs: Protect open coded isize seqcount
net: u64_stat: Protect seqcount

----
arch/ia64/kernel/asm-offsets.c | 4 +-
arch/ia64/kernel/fsys.S | 2 +-
arch/ia64/kernel/fsyscall_gtod_data.h | 2 +-
arch/ia64/kernel/time.c | 10 +-
arch/powerpc/platforms/cell/spufs/inode.c | 6 +-
arch/x86/include/asm/irq_vectors.h | 6 +-
arch/x86/include/asm/traps.h | 4 +
arch/x86/include/asm/vgtod.h | 2 +-
arch/x86/include/asm/vsyscall.h | 12 ++
arch/x86/kernel/Makefile | 1 +
arch/x86/kernel/entry_64.S | 2 +
arch/x86/kernel/traps.c | 6 +
arch/x86/kernel/vmlinux.lds.S | 4 -
arch/x86/kernel/vsyscall_64.c | 289 ++++++++++++-----------------
arch/x86/kernel/vsyscall_emu_64.S | 27 +++
arch/x86/vdso/vclock_gettime.c | 16 +-
drivers/infiniband/hw/ipath/ipath_fs.c | 6 +-
drivers/infiniband/hw/qib/qib_fs.c | 6 +-
drivers/usb/core/inode.c | 12 +-
fs/9p/vfs_dir.c | 4 +-
fs/afs/dir.c | 4 +-
fs/autofs4/autofs_i.h | 24 ++--
fs/autofs4/expire.c | 44 +++---
fs/autofs4/root.c | 38 ++--
fs/btrfs/export.c | 4 +-
fs/ceph/caps.c | 8 +-
fs/ceph/debugfs.c | 8 +-
fs/ceph/dir.c | 26 ++--
fs/ceph/inode.c | 20 +-
fs/ceph/mds_client.c | 16 +-
fs/cifs/dir.c | 6 +-
fs/coda/cache.c | 4 +-
fs/configfs/configfs_internal.h | 4 +-
fs/configfs/inode.c | 6 +-
fs/dcache.c | 269 +++++++++++++--------------
fs/dcookies.c | 8 +-
fs/exec.c | 4 +-
fs/exportfs/expfs.c | 12 +-
fs/fat/inode.c | 4 +-
fs/fat/namei_vfat.c | 4 +-
fs/fhandle.c | 4 +-
fs/fs-writeback.c | 4 +-
fs/fs_struct.c | 46 ++---
fs/fuse/inode.c | 4 +-
fs/gfs2/export.c | 4 +-
fs/isofs/export.c | 4 +-
fs/libfs.c | 36 ++--
fs/namei.c | 56 +++---
fs/namespace.c | 8 +-
fs/ncpfs/dir.c | 6 +-
fs/ncpfs/ncplib_kernel.h | 8 +-
fs/nfs/dir.c | 6 +-
fs/nfs/getroot.c | 12 +-
fs/nfs/namespace.c | 16 +-
fs/nfs/unlink.c | 20 +-
fs/nilfs2/namei.c | 4 +-
fs/notify/fsnotify.c | 8 +-
fs/notify/vfsmount_mark.c | 24 ++--
fs/ocfs2/dcache.c | 6 +-
fs/ocfs2/export.c | 4 +-
fs/reiserfs/inode.c | 4 +-
fs/udf/namei.c | 4 +-
fs/xfs/linux-2.6/xfs_export.c | 8 +-
include/linux/dcache.h | 15 +-
include/linux/fs.h | 6 +-
include/linux/fs_struct.h | 16 +-
include/linux/fsnotify_backend.h | 6 +-
include/linux/lglock.h | 35 ++++-
include/linux/seccomp.h | 10 +
include/linux/seqlock.h | 142 +++++++++------
include/linux/u64_stats_sync.h | 2 +
include/net/neighbour.h | 2 +-
kernel/cgroup.c | 22 +-
kernel/cpu.c | 35 +++-
kernel/fork.c | 10 +-
kernel/rtmutex.c | 6 +-
kernel/sched.c | 6 +-
kernel/time/jiffies.c | 4 +-
kernel/time/ntp.c | 24 ++-
kernel/time/tick-common.c | 10 +-
kernel/time/tick-internal.h | 3 +-
kernel/time/tick-sched.c | 16 +-
kernel/time/timekeeping.c | 89 +++++----
kernel/timer.c | 16 +-
kernel/trace/ring_buffer.c | 2 +-
localversion-rt | 2 +-
net/sunrpc/rpc_pipe.c | 6 +-
security/selinux/selinuxfs.c | 14 +-
88 files changed, 919 insertions(+), 810 deletions(-)
---------------------------
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index af56501..106aeb6 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -269,8 +269,8 @@ void foo(void)
BLANK();

/* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */
- DEFINE(IA64_GTOD_LOCK_OFFSET,
- offsetof (struct fsyscall_gtod_data_t, lock));
+ DEFINE(IA64_GTOD_SEQ_OFFSET,
+ offsetof (struct fsyscall_gtod_data_t, seq);
DEFINE(IA64_GTOD_WALL_TIME_OFFSET,
offsetof (struct fsyscall_gtod_data_t, wall_time));
DEFINE(IA64_GTOD_MONO_TIME_OFFSET,
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 331d42b..fa77de7 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -174,7 +174,7 @@ ENTRY(fsys_set_tid_address)
FSYS_RETURN
END(fsys_set_tid_address)

-#if IA64_GTOD_LOCK_OFFSET !=0
+#if IA64_GTOD_SEQ_OFFSET !=0
#error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t
#endif
#if IA64_ITC_JITTER_OFFSET !=0
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index 57d2ee6..146b15b 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -6,7 +6,7 @@
*/

struct fsyscall_gtod_data_t {
- seqlock_t lock;
+ seqcount_t seq;
struct timespec wall_time;
struct timespec monotonic_time;
cycle_t clk_mask;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 604a636..15823ed 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -35,9 +35,7 @@

static cycle_t itc_get_cycles(struct clocksource *cs);

-struct fsyscall_gtod_data_t fsyscall_gtod_data = {
- .lock = __RAW_SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
-};
+struct fsyscall_gtod_data_t fsyscall_gtod_data;

struct itc_jitter_data_t itc_jitter_data;

@@ -460,9 +458,7 @@ void update_vsyscall_tz(void)
void update_vsyscall(struct timespec *wall, struct timespec *wtm,
struct clocksource *c, u32 mult)
{
- unsigned long flags;
-
- raw_write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
+ write_seqcount_begin(&fsyscall_gtod_data.seq);

/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
@@ -485,6 +481,6 @@ void update_vsyscall(struct timespec *wall, struct timespec *wtm,
fsyscall_gtod_data.monotonic_time.tv_sec++;
}

- raw_write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
+ write_seqcount_end(&fsyscall_gtod_data.seq);
}

diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 856e9c3..1baf322 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -165,18 +165,18 @@ static void spufs_prune_dir(struct dentry *dir)

mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (!(d_unhashed(dentry)) && dentry->d_inode) {
dget_dlock(dentry);
__d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
simple_unlink(dir->d_inode, dentry);
/* XXX: what was dcache_lock protecting here? Other
* filesystems (IB, configfs) release dcache_lock
* before unlink */
dput(dentry);
} else {
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}
}
shrink_dcache_parent(dir);
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 6e976ee..a563c50 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -17,7 +17,8 @@
* Vectors 0 ... 31 : system traps and exceptions - hardcoded events
* Vectors 32 ... 127 : device interrupts
* Vector 128 : legacy int80 syscall interface
- * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 : device interrupts
+ * Vector 204 : legacy x86_64 vsyscall emulation
+ * Vectors 129 ... INVALIDATE_TLB_VECTOR_START-1 except 204 : device interrupts
* Vectors INVALIDATE_TLB_VECTOR_START ... 255 : special interrupts
*
* 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
@@ -50,6 +51,9 @@
#ifdef CONFIG_X86_32
# define SYSCALL_VECTOR 0x80
#endif
+#ifdef CONFIG_X86_64
+# define VSYSCALL_EMU_VECTOR 0xcc
+#endif

/*
* Vectors 0x30-0x3f are used for ISA interrupts.
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 0310da6..2bae0a5 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_TRAPS_H
#define _ASM_X86_TRAPS_H

+#include <linux/kprobes.h>
+
#include <asm/debugreg.h>
#include <asm/siginfo.h> /* TRAP_TRACE, ... */

@@ -38,6 +40,7 @@ asmlinkage void alignment_check(void);
asmlinkage void machine_check(void);
#endif /* CONFIG_X86_MCE */
asmlinkage void simd_coprocessor_error(void);
+asmlinkage void emulate_vsyscall(void);

dotraplinkage void do_divide_error(struct pt_regs *, long);
dotraplinkage void do_debug(struct pt_regs *, long);
@@ -64,6 +67,7 @@ dotraplinkage void do_alignment_check(struct pt_regs *, long);
dotraplinkage void do_machine_check(struct pt_regs *, long);
#endif
dotraplinkage void do_simd_coprocessor_error(struct pt_regs *, long);
+dotraplinkage void do_emulate_vsyscall(struct pt_regs *, long);
#ifdef CONFIG_X86_32
dotraplinkage void do_iret_error(struct pt_regs *, long);
#endif
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 4f72846..10e09fc 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -5,7 +5,7 @@
#include <linux/clocksource.h>

struct vsyscall_gtod_data {
- raw_seqlock_t lock;
+ seqcount_t seq;

/* open coded 'struct timespec' */
time_t wall_time_sec;
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index d555973..bb710cb 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -31,6 +31,18 @@ extern struct timezone sys_tz;

extern void map_vsyscall(void);

+/* Emulation */
+
+static inline bool is_vsyscall_entry(unsigned long addr)
+{
+ return (addr & ~0xC00UL) == VSYSCALL_START;
+}
+
+static inline int vsyscall_entry_nr(unsigned long addr)
+{
+ return (addr & 0xC00UL) >> 10;
+}
+
#endif /* __KERNEL__ */

#endif /* _ASM_X86_VSYSCALL_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 90b06d4..cc0469a 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -44,6 +44,7 @@ obj-y += probe_roms.o
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o vread_tsc_64.o
+obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 1689be7..eb7fa5c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1121,6 +1121,8 @@ zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
zeroentry coprocessor_error do_coprocessor_error
errorentry alignment_check do_alignment_check
zeroentry simd_coprocessor_error do_simd_coprocessor_error
+zeroentry emulate_vsyscall do_emulate_vsyscall
+

/* Reload gs selector with exception handling */
/* edi: new selector */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index d343009..25c0dd4 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -895,6 +895,12 @@ void __init trap_init(void)
set_bit(SYSCALL_VECTOR, used_vectors);
#endif

+#ifdef CONFIG_X86_64
+ BUG_ON(test_bit(VSYSCALL_EMU_VECTOR, used_vectors));
+ set_system_intr_gate(VSYSCALL_EMU_VECTOR, &emulate_vsyscall);
+ set_bit(VSYSCALL_EMU_VECTOR, used_vectors);
+#endif
+
/*
* Should be a barrier for any external CPU state:
*/
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 89aed99..85d4a06 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -188,10 +188,6 @@ SECTIONS
*(.vsyscall_2)
}

- .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
- *(.vsyscall_3)
- }
-
#define __VVAR_KERNEL_LDS
#include <asm/vvar.h>
#undef __VVAR_KERNEL_LDS
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index bbcbaaa..c1fe684 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -2,6 +2,8 @@
* Copyright (C) 2001 Andrea Arcangeli <andrea@xxxxx> SuSE
* Copyright 2003 Andi Kleen, SuSE Labs.
*
+ * [ NOTE: this mechanism is now deprecated in favor of the vDSO. ]
+ *
* Thanks to hpa@xxxxx for some useful hint.
* Special thanks to Ingo Molnar for his early experience with
* a different vsyscall implementation for Linux/IA32 and for the name.
@@ -11,10 +13,9 @@
* vsyscalls. One vsyscall can reserve more than 1 slot to avoid
* jumping out of line if necessary. We cannot add more with this
* mechanism because older kernels won't return -ENOSYS.
- * If we want more than four we need a vDSO.
*
- * Note: the concept clashes with user mode linux. If you use UML and
- * want per guest time just set the kernel.vsyscall64 sysctl to 0.
+ * Note: the concept clashes with user mode linux. UML users should
+ * use the vDSO.
*/

/* Disable profiling for userspace code: */
@@ -32,6 +33,8 @@
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/notifier.h>
+#include <linux/syscalls.h>
+#include <linux/ratelimit.h>

#include <asm/vsyscall.h>
#include <asm/pgtable.h>
@@ -44,187 +47,138 @@
#include <asm/desc.h>
#include <asm/topology.h>
#include <asm/vgtod.h>
-
-#define __vsyscall(nr) \
- __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
-#define __syscall_clobber "r11","cx","memory"
+#include <asm/traps.h>

DEFINE_VVAR(int, vgetcpu_mode);
-DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
-{
- .lock = __RAW_SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
- .sysctl_enabled = 1,
-};
+DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);

void update_vsyscall_tz(void)
{
- unsigned long flags;
-
- raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
- /* sys_tz has changed */
vsyscall_gtod_data.sys_tz = sys_tz;
- raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}

void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
struct clocksource *clock, u32 mult)
{
- unsigned long flags;
+ write_seqcount_begin(&vsyscall_gtod_data.seq);
+

- raw_write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
/* copy vsyscall data */
- vsyscall_gtod_data.clock.vread = clock->vread;
- vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
- vsyscall_gtod_data.clock.mask = clock->mask;
- vsyscall_gtod_data.clock.mult = mult;
- vsyscall_gtod_data.clock.shift = clock->shift;
- vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
- vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
- vsyscall_gtod_data.wall_to_monotonic = *wtm;
- vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
- raw_write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
+ vsyscall_gtod_data.clock.vread = clock->vread;
+ vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
+ vsyscall_gtod_data.clock.mask = clock->mask;
+ vsyscall_gtod_data.clock.mult = mult;
+ vsyscall_gtod_data.clock.shift = clock->shift;
+ vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
+ vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
+ vsyscall_gtod_data.wall_to_monotonic = *wtm;
+ vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
+
+ write_seqcount_end(&vsyscall_gtod_data.seq);
}

-/* RED-PEN may want to readd seq locking, but then the variable should be
- * write-once.
- */
-static __always_inline void do_get_tz(struct timezone * tz)
+static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+ const char *message)
{
- *tz = VVAR(vsyscall_gtod_data).sys_tz;
-}
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+ struct task_struct *tsk;

-static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
-{
- int ret;
- asm volatile("syscall"
- : "=a" (ret)
- : "0" (__NR_gettimeofday),"D" (tv),"S" (tz)
- : __syscall_clobber );
- return ret;
-}
+ if (!show_unhandled_signals || !__ratelimit(&rs))
+ return;

-static __always_inline long time_syscall(long *t)
-{
- long secs;
- asm volatile("syscall"
- : "=a" (secs)
- : "0" (__NR_time),"D" (t) : __syscall_clobber);
- return secs;
-}
+ tsk = current;

-static __always_inline void do_vgettimeofday(struct timeval * tv)
-{
- cycle_t now, base, mask, cycle_delta;
- unsigned seq;
- unsigned long mult, shift, nsec;
- cycle_t (*vread)(void);
- do {
- seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
-
- vread = VVAR(vsyscall_gtod_data).clock.vread;
- if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
- !vread)) {
- gettimeofday(tv,NULL);
- return;
- }
-
- now = vread();
- base = VVAR(vsyscall_gtod_data).clock.cycle_last;
- mask = VVAR(vsyscall_gtod_data).clock.mask;
- mult = VVAR(vsyscall_gtod_data).clock.mult;
- shift = VVAR(vsyscall_gtod_data).clock.shift;
-
- tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec;
- nsec = VVAR(vsyscall_gtod_data).wall_time_nsec;
- } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
-
- /* calculate interval: */
- cycle_delta = (now - base) & mask;
- /* convert to nsecs: */
- nsec += (cycle_delta * mult) >> shift;
-
- while (nsec >= NSEC_PER_SEC) {
- tv->tv_sec += 1;
- nsec -= NSEC_PER_SEC;
- }
- tv->tv_usec = nsec / NSEC_PER_USEC;
+ printk("%s%s[%d] %s ip:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+ level, tsk->comm, task_pid_nr(tsk),
+ message, regs->ip - 2, regs->sp, regs->ax, regs->si, regs->di);
}

-int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
+void dotraplinkage do_emulate_vsyscall(struct pt_regs *regs, long error_code)
{
- if (tv)
- do_vgettimeofday(tv);
- if (tz)
- do_get_tz(tz);
- return 0;
-}
-
-/* This will break when the xtime seconds get inaccurate, but that is
- * unlikely */
-time_t __vsyscall(1) vtime(time_t *t)
-{
- unsigned seq;
- time_t result;
- if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
- return time_syscall(t);
+ const char *vsyscall_name;
+ struct task_struct *tsk;
+ unsigned long caller;
+ int vsyscall_nr;
+ long ret;
+
+ /* Kernel code must never get here. */
+ BUG_ON(!user_mode(regs));
+
+ local_irq_enable();
+
+ /*
+ * x86-ism here: regs->ip points to the instruction after the int 0xcc,
+ * and int 0xcc is two bytes long.
+ */
+ if (!is_vsyscall_entry(regs->ip - 2)) {
+ warn_bad_vsyscall(KERN_WARNING, regs, "illegal int 0xcc (exploit attempt?)");
+ goto sigsegv;
+ }
+ vsyscall_nr = vsyscall_entry_nr(regs->ip - 2);

- do {
- seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
+ if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
+ warn_bad_vsyscall(KERN_WARNING, regs, "int 0xcc with bad stack (exploit attempt?)");
+ goto sigsegv;
+ }

- result = VVAR(vsyscall_gtod_data).wall_time_sec;
+ tsk = current;
+ if (seccomp_mode(&tsk->seccomp))
+ do_exit(SIGKILL);
+
+ switch (vsyscall_nr) {
+ case 0:
+ vsyscall_name = "gettimeofday";
+ ret = sys_gettimeofday(
+ (struct timeval __user *)regs->di,
+ (struct timezone __user *)regs->si);
+ break;
+
+ case 1:
+ vsyscall_name = "time";
+ ret = sys_time((time_t __user *)regs->di);
+ break;
+
+ case 2:
+ vsyscall_name = "getcpu";
+ ret = sys_getcpu((unsigned __user *)regs->di,
+ (unsigned __user *)regs->si,
+ 0);
+ break;
+
+ default:
+ /*
+ * If we get here, then vsyscall_nr indicates that int 0xcc
+ * happened at an address in the vsyscall page that doesn't
+ * contain int 0xcc. That can't happen.
+ */
+ BUG();
+ }

- } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
+ if (ret == -EFAULT) {
+ /*
+ * Bad news -- userspace fed a bad pointer to a vsyscall.
+ *
+ * With a real vsyscall, that would have caused SIGSEGV.
+ * To make writing reliable exploits using the emulated
+ * vsyscalls harder, generate SIGSEGV here as well.
+ */
+ warn_bad_vsyscall(KERN_INFO, regs,
+ "vsyscall fault (exploit attempt?)");
+ goto sigsegv;
+ }

- if (t)
- *t = result;
- return result;
-}
+ regs->ax = ret;

-/* Fast way to get current CPU and node.
- This helps to do per node and per CPU caches in user space.
- The result is not guaranteed without CPU affinity, but usually
- works out because the scheduler tries to keep a thread on the same
- CPU.
+ /* Emulate a ret instruction. */
+ regs->ip = caller;
+ regs->sp += 8;

- tcache must point to a two element sized long array.
- All arguments can be NULL. */
-long __vsyscall(2)
-vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
-{
- unsigned int p;
- unsigned long j = 0;
-
- /* Fast cache - only recompute value once per jiffies and avoid
- relatively costly rdtscp/cpuid otherwise.
- This works because the scheduler usually keeps the process
- on the same CPU and this syscall doesn't guarantee its
- results anyways.
- We do this here because otherwise user space would do it on
- its own in a likely inferior way (no access to jiffies).
- If you don't like it pass NULL. */
- if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) {
- p = tcache->blob[1];
- } else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
- /* Load per CPU data from RDTSCP */
- native_read_tscp(&p);
- } else {
- /* Load per CPU data from GDT */
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
- }
- if (tcache) {
- tcache->blob[0] = j;
- tcache->blob[1] = p;
- }
- if (cpu)
- *cpu = p & 0xfff;
- if (node)
- *node = p >> 12;
- return 0;
-}
+ local_irq_disable();
+ return;

-static long __vsyscall(3) venosys_1(void)
-{
- return -ENOSYS;
+sigsegv:
+ regs->ip -= 2; /* The faulting instruction should be the int 0xcc. */
+ force_sig(SIGSEGV, current);
}

#ifdef CONFIG_SYSCTL
@@ -243,8 +197,10 @@ static ctl_table kernel_root_table2[] = {
};
#endif

-/* Assume __initcall executes before all user space. Hopefully kmod
- doesn't violate that. We'll find out if it does. */
+/*
+ * Assume __initcall executes before all user space. Hopefully kmod
+ * doesn't violate that. We'll find out if it does.
+ */
static void __cpuinit vsyscall_set_cpu(int cpu)
{
unsigned long d;
@@ -255,13 +211,15 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu);

- /* Store cpu number in limit so that it can be loaded quickly
- in user space in vgetcpu.
- 12 bits for the CPU and 8 bits for the node. */
+ /*
+ * Store cpu number in limit so that it can be loaded quickly
+ * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
+ */
d = 0x0f40000000000ULL;
d |= cpu;
d |= (node & 0xf) << 12;
d |= (node >> 4) << 48;
+
write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
}

@@ -275,8 +233,10 @@ static int __cpuinit
cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
{
long cpu = (long)arg;
+
if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1);
+
return NOTIFY_DONE;
}

@@ -291,18 +251,15 @@ void __init map_vsyscall(void)

static int __init vsyscall_init(void)
{
- BUG_ON(((unsigned long) &vgettimeofday !=
- VSYSCALL_ADDR(__NR_vgettimeofday)));
- BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
- BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
- BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
+ BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
+
#ifdef CONFIG_SYSCTL
register_sysctl_table(kernel_root_table2);
#endif
on_each_cpu(cpu_vsyscall_init, NULL, 1);
/* notifier priority > KVM */
hotcpu_notifier(cpu_vsyscall_notifier, 30);
+
return 0;
}
-
__initcall(vsyscall_init);
diff --git a/arch/x86/kernel/vsyscall_emu_64.S b/arch/x86/kernel/vsyscall_emu_64.S
new file mode 100644
index 0000000..ffa845e
--- /dev/null
+++ b/arch/x86/kernel/vsyscall_emu_64.S
@@ -0,0 +1,27 @@
+/*
+ * vsyscall_emu_64.S: Vsyscall emulation page
+ *
+ * Copyright (c) 2011 Andy Lutomirski
+ *
+ * Subject to the GNU General Public License, version 2
+ */
+
+#include <linux/linkage.h>
+#include <asm/irq_vectors.h>
+
+/* The unused parts of the page are filled with 0xcc by the linker script. */
+
+.section .vsyscall_0, "a"
+ENTRY(vsyscall_0)
+ int $VSYSCALL_EMU_VECTOR
+END(vsyscall_0)
+
+.section .vsyscall_1, "a"
+ENTRY(vsyscall_1)
+ int $VSYSCALL_EMU_VECTOR
+END(vsyscall_1)
+
+.section .vsyscall_2, "a"
+ENTRY(vsyscall_2)
+ int $VSYSCALL_EMU_VECTOR
+END(vsyscall_2)
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index a724905..117e6cf 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -46,11 +46,11 @@ notrace static noinline int do_realtime(struct timespec *ts)
{
unsigned long seq, ns;
do {
- seq = read_seqbegin(>od->lock);
+ seq = read_seqcount_begin(>od->seq);
ts->tv_sec = gtod->wall_time_sec;
ts->tv_nsec = gtod->wall_time_nsec;
ns = vgetns();
- } while (unlikely(read_seqretry(>od->lock, seq)));
+ } while (unlikely(read_seqcount_retry(>od->seq, seq)));
timespec_add_ns(ts, ns);
return 0;
}
@@ -59,12 +59,12 @@ notrace static noinline int do_monotonic(struct timespec *ts)
{
unsigned long seq, ns, secs;
do {
- seq = read_seqbegin(>od->lock);
+ seq = read_seqcount_begin(>od->seq);
secs = gtod->wall_time_sec;
ns = gtod->wall_time_nsec + vgetns();
secs += gtod->wall_to_monotonic.tv_sec;
ns += gtod->wall_to_monotonic.tv_nsec;
- } while (unlikely(read_seqretry(>od->lock, seq)));
+ } while (unlikely(read_seqcount_retry(>od->seq, seq)));

/* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
* are all guaranteed to be nonnegative.
@@ -83,10 +83,10 @@ notrace static noinline int do_realtime_coarse(struct timespec *ts)
{
unsigned long seq;
do {
- seq = read_seqbegin(>od->lock);
+ seq = read_seqcount_begin(>od->seq);
ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
- } while (unlikely(read_seqretry(>od->lock, seq)));
+ } while (unlikely(read_seqcount_retry(>od->seq, seq)));
return 0;
}

@@ -94,12 +94,12 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
{
unsigned long seq, ns, secs;
do {
- seq = read_seqbegin(>od->lock);
+ seq = read_seqcount_begin(>od->seq);
secs = gtod->wall_time_coarse.tv_sec;
ns = gtod->wall_time_coarse.tv_nsec;
secs += gtod->wall_to_monotonic.tv_sec;
ns += gtod->wall_to_monotonic.tv_nsec;
- } while (unlikely(read_seqretry(>od->lock, seq)));
+ } while (unlikely(read_seqcount_retry(>od->seq, seq)));

/* wall_time_nsec and wall_to_monotonic.tv_nsec are
* guaranteed to be between 0 and NSEC_PER_SEC.
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 31ae1b1..21319f7 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -277,14 +277,14 @@ static int remove_file(struct dentry *parent, char *name)
goto bail;
}

- spin_lock(&tmp->d_lock);
+ seq_spin_lock(&tmp->d_lock);
if (!(d_unhashed(tmp) && tmp->d_inode)) {
dget_dlock(tmp);
__d_drop(tmp);
- spin_unlock(&tmp->d_lock);
+ seq_spin_unlock(&tmp->d_lock);
simple_unlink(parent->d_inode, tmp);
} else
- spin_unlock(&tmp->d_lock);
+ seq_spin_unlock(&tmp->d_lock);

ret = 0;
bail:
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index df7fa25..b20c7f8 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -453,14 +453,14 @@ static int remove_file(struct dentry *parent, char *name)
goto bail;
}

- spin_lock(&tmp->d_lock);
+ seq_spin_lock(&tmp->d_lock);
if (!(d_unhashed(tmp) && tmp->d_inode)) {
dget_dlock(tmp);
__d_drop(tmp);
- spin_unlock(&tmp->d_lock);
+ seq_spin_unlock(&tmp->d_lock);
simple_unlink(parent->d_inode, tmp);
} else {
- spin_unlock(&tmp->d_lock);
+ seq_spin_unlock(&tmp->d_lock);
}

ret = 0;
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 2278dad..731f03f 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -343,19 +343,19 @@ static int usbfs_empty (struct dentry *dentry)
{
struct list_head *list;

- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
list_for_each(list, &dentry->d_subdirs) {
struct dentry *de = list_entry(list, struct dentry, d_u.d_child);

- spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED);
+ seq_spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED);
if (usbfs_positive(de)) {
- spin_unlock(&de->d_lock);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&de->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return 0;
}
- spin_unlock(&de->d_lock);
+ seq_spin_unlock(&de->d_lock);
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return 1;
}

diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 9c2bdda..0ca9433 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -107,7 +107,7 @@ static int v9fs_alloc_rdir_buf(struct file *filp, int buflen)
err = -ENOMEM;
goto exit;
}
- spin_lock(&filp->f_dentry->d_lock);
+ seq_spin_lock(&filp->f_dentry->d_lock);
if (!fid->rdir) {
rdir->buf = (uint8_t *)rdir + sizeof(struct p9_rdir);
mutex_init(&rdir->mutex);
@@ -115,7 +115,7 @@ static int v9fs_alloc_rdir_buf(struct file *filp, int buflen)
fid->rdir = (void *) rdir;
rdir = NULL;
}
- spin_unlock(&filp->f_dentry->d_lock);
+ seq_spin_unlock(&filp->f_dentry->d_lock);
kfree(rdir);
}
exit:
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 1b0b195..84f6bf6 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -705,9 +705,9 @@ out_skip:

/* the dirent, if it exists, now points to a different vnode */
not_found:
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_NFSFS_RENAMED;
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);

out_bad:
if (dentry->d_inode) {
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 475f9c5..b620114 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -207,9 +207,9 @@ static inline void __managed_dentry_set_automount(struct dentry *dentry)

static inline void managed_dentry_set_automount(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__managed_dentry_set_automount(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}

static inline void __managed_dentry_clear_automount(struct dentry *dentry)
@@ -219,9 +219,9 @@ static inline void __managed_dentry_clear_automount(struct dentry *dentry)

static inline void managed_dentry_clear_automount(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__managed_dentry_clear_automount(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}

static inline void __managed_dentry_set_transit(struct dentry *dentry)
@@ -231,9 +231,9 @@ static inline void __managed_dentry_set_transit(struct dentry *dentry)

static inline void managed_dentry_set_transit(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__managed_dentry_set_transit(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}

static inline void __managed_dentry_clear_transit(struct dentry *dentry)
@@ -243,9 +243,9 @@ static inline void __managed_dentry_clear_transit(struct dentry *dentry)

static inline void managed_dentry_clear_transit(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__managed_dentry_clear_transit(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}

static inline void __managed_dentry_set_managed(struct dentry *dentry)
@@ -255,9 +255,9 @@ static inline void __managed_dentry_set_managed(struct dentry *dentry)

static inline void managed_dentry_set_managed(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__managed_dentry_set_managed(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}

static inline void __managed_dentry_clear_managed(struct dentry *dentry)
@@ -267,9 +267,9 @@ static inline void __managed_dentry_clear_managed(struct dentry *dentry)

static inline void managed_dentry_clear_managed(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__managed_dentry_clear_managed(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}

/* Initializing function */
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 450f529..d8b6184 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -99,7 +99,7 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
spin_lock(&sbi->lookup_lock);

if (prev == NULL) {
- spin_lock(&root->d_lock);
+ seq_spin_lock(&root->d_lock);
prev = dget_dlock(root);
next = prev->d_subdirs.next;
p = prev;
@@ -107,12 +107,12 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev,
}

p = prev;
- spin_lock(&p->d_lock);
+ seq_spin_lock(&p->d_lock);
again:
next = p->d_u.d_child.next;
start:
if (next == &root->d_subdirs) {
- spin_unlock(&p->d_lock);
+ seq_spin_unlock(&p->d_lock);
spin_unlock(&sbi->lookup_lock);
dput(prev);
return NULL;
@@ -120,16 +120,16 @@ start:

q = list_entry(next, struct dentry, d_u.d_child);

- spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
+ seq_spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED);
/* Negative dentry - try next */
if (!simple_positive(q)) {
- spin_unlock(&p->d_lock);
+ seq_spin_unlock(&p->d_lock);
p = q;
goto again;
}
dget_dlock(q);
- spin_unlock(&q->d_lock);
- spin_unlock(&p->d_lock);
+ seq_spin_unlock(&q->d_lock);
+ seq_spin_unlock(&p->d_lock);
spin_unlock(&sbi->lookup_lock);

dput(prev);
@@ -153,7 +153,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev,
spin_lock(&sbi->lookup_lock);
relock:
p = prev;
- spin_lock(&p->d_lock);
+ seq_spin_lock(&p->d_lock);
again:
next = p->d_subdirs.next;
if (next == &p->d_subdirs) {
@@ -161,19 +161,19 @@ again:
struct dentry *parent;

if (p == root) {
- spin_unlock(&p->d_lock);
+ seq_spin_unlock(&p->d_lock);
spin_unlock(&sbi->lookup_lock);
dput(prev);
return NULL;
}

parent = p->d_parent;
- if (!spin_trylock(&parent->d_lock)) {
- spin_unlock(&p->d_lock);
+ if (!seq_spin_trylock(&parent->d_lock)) {
+ seq_spin_unlock(&p->d_lock);
cpu_relax();
goto relock;
}
- spin_unlock(&p->d_lock);
+ seq_spin_unlock(&p->d_lock);
next = p->d_u.d_child.next;
p = parent;
if (next != &parent->d_subdirs)
@@ -182,16 +182,16 @@ again:
}
ret = list_entry(next, struct dentry, d_u.d_child);

- spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
+ seq_spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED);
/* Negative dentry - try next */
if (!simple_positive(ret)) {
- spin_unlock(&p->d_lock);
+ seq_spin_unlock(&p->d_lock);
p = ret;
goto again;
}
dget_dlock(ret);
- spin_unlock(&ret->d_lock);
- spin_unlock(&p->d_lock);
+ seq_spin_unlock(&ret->d_lock);
+ seq_spin_unlock(&p->d_lock);
spin_unlock(&sbi->lookup_lock);

dput(prev);
@@ -462,11 +462,11 @@ found:
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
spin_lock(&sbi->lookup_lock);
- spin_lock(&expired->d_parent->d_lock);
- spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
+ seq_spin_lock(&expired->d_parent->d_lock);
+ seq_spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
- spin_unlock(&expired->d_lock);
- spin_unlock(&expired->d_parent->d_lock);
+ seq_spin_unlock(&expired->d_lock);
+ seq_spin_unlock(&expired->d_parent->d_lock);
spin_unlock(&sbi->lookup_lock);
return expired;
}
@@ -556,7 +556,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,

spin_lock(&sbi->fs_lock);
ino->flags &= ~AUTOFS_INF_EXPIRING;
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (!ret) {
if ((IS_ROOT(dentry) ||
(autofs_type_indirect(sbi->type) &&
@@ -564,7 +564,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
!(dentry->d_flags & DCACHE_NEED_AUTOMOUNT))
__managed_dentry_set_automount(dentry);
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
dput(dentry);
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index f55ae23..4a52674 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -124,13 +124,13 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
* it.
*/
spin_lock(&sbi->lookup_lock);
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);
return -ENOENT;
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);

out:
@@ -179,7 +179,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
ino = list_entry(p, struct autofs_info, active);
active = ino->dentry;

- spin_lock(&active->d_lock);
+ seq_spin_lock(&active->d_lock);

/* Already gone? */
if (active->d_count == 0)
@@ -199,12 +199,12 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)

if (d_unhashed(active)) {
dget_dlock(active);
- spin_unlock(&active->d_lock);
+ seq_spin_unlock(&active->d_lock);
spin_unlock(&sbi->lookup_lock);
return active;
}
next:
- spin_unlock(&active->d_lock);
+ seq_spin_unlock(&active->d_lock);
}
spin_unlock(&sbi->lookup_lock);

@@ -231,7 +231,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
ino = list_entry(p, struct autofs_info, expiring);
expiring = ino->dentry;

- spin_lock(&expiring->d_lock);
+ seq_spin_lock(&expiring->d_lock);

/* Bad luck, we've already been dentry_iput */
if (!expiring->d_inode)
@@ -251,12 +251,12 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)

if (d_unhashed(expiring)) {
dget_dlock(expiring);
- spin_unlock(&expiring->d_lock);
+ seq_spin_unlock(&expiring->d_lock);
spin_unlock(&sbi->lookup_lock);
return expiring;
}
next:
- spin_unlock(&expiring->d_lock);
+ seq_spin_unlock(&expiring->d_lock);
}
spin_unlock(&sbi->lookup_lock);

@@ -382,12 +382,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
if (have_submounts(dentry))
goto done;
} else {
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
goto done;
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}
ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&sbi->fs_lock);
@@ -410,12 +410,12 @@ done:
* an actual mount so ->d_automount() won't be called during
* the follow.
*/
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if ((!d_mountpoint(dentry) &&
!list_empty(&dentry->d_subdirs)) ||
(dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
__managed_dentry_clear_automount(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}
spin_unlock(&sbi->fs_lock);

@@ -597,9 +597,9 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)

spin_lock(&sbi->lookup_lock);
__autofs4_add_expiring(dentry);
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);

return 0;
@@ -670,15 +670,15 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
return -EACCES;

spin_lock(&sbi->lookup_lock);
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);
return -ENOTEMPTY;
}
__autofs4_add_expiring(dentry);
__d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);

if (sbi->version < 5)
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 1b8dc33..c473324 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -40,14 +40,14 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
struct inode *parent;
u64 parent_root_id;

- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);

parent = dentry->d_parent->d_inode;
fid->parent_objectid = BTRFS_I(parent)->location.objectid;
fid->parent_gen = parent->i_generation;
parent_root_id = BTRFS_I(parent)->root->objectid;

- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);

if (parent_root_id != fid->root_objectid) {
fid->parent_root_objectid = parent_root_id;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f605753..2c2ac3a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3065,14 +3065,14 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
* doesn't have to be perfect; the mds will revoke anything we don't
* release.
*/
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (di->lease_session && di->lease_session->s_mds == mds)
force = 1;
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);

ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);

- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (ret && di->lease_session && di->lease_session->s_mds == mds) {
dout("encode_dentry_release %p mds%d seq %d\n",
dentry, mds, (int)di->lease_seq);
@@ -3082,6 +3082,6 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
rel->dname_seq = cpu_to_le32(di->lease_seq);
__ceph_mdsc_drop_dentry_lease(dentry);
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return ret;
}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 0dba691..0ecffe2 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -82,13 +82,13 @@ static int mdsc_show(struct seq_file *s, void *p)
&pathbase, 0);
if (IS_ERR(path))
path = NULL;
- spin_lock(&req->r_dentry->d_lock);
+ seq_spin_lock(&req->r_dentry->d_lock);
seq_printf(s, " #%llx/%.*s (%s)",
ceph_ino(req->r_dentry->d_parent->d_inode),
req->r_dentry->d_name.len,
req->r_dentry->d_name.name,
path ? path : "");
- spin_unlock(&req->r_dentry->d_lock);
+ seq_spin_unlock(&req->r_dentry->d_lock);
kfree(path);
} else if (req->r_path1) {
seq_printf(s, " #%llx/%s", req->r_ino1.ino,
@@ -100,13 +100,13 @@ static int mdsc_show(struct seq_file *s, void *p)
&pathbase, 0);
if (IS_ERR(path))
path = NULL;
- spin_lock(&req->r_old_dentry->d_lock);
+ seq_spin_lock(&req->r_old_dentry->d_lock);
seq_printf(s, " #%llx/%.*s (%s)",
ceph_ino(req->r_old_dentry->d_parent->d_inode),
req->r_old_dentry->d_name.len,
req->r_old_dentry->d_name.name,
path ? path : "");
- spin_unlock(&req->r_old_dentry->d_lock);
+ seq_spin_unlock(&req->r_old_dentry->d_lock);
kfree(path);
} else if (req->r_path2) {
if (req->r_ino2.ino)
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index ef8f08c..f0f6efd 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -52,7 +52,7 @@ int ceph_init_dentry(struct dentry *dentry)
if (!di)
return -ENOMEM; /* oh well */

- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (dentry->d_fsdata) {
/* lost a race */
kmem_cache_free(ceph_dentry_cachep, di);
@@ -64,7 +64,7 @@ int ceph_init_dentry(struct dentry *dentry)
dentry->d_time = jiffies;
ceph_dentry_lru_add(dentry);
out_unlock:
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return 0;
}

@@ -112,7 +112,7 @@ static int __dcache_readdir(struct file *filp,
dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
last);

- spin_lock(&parent->d_lock);
+ seq_spin_lock(&parent->d_lock);

/* start at beginning? */
if (filp->f_pos == 2 || last == NULL ||
@@ -136,7 +136,7 @@ more:
fi->at_end = 1;
goto out_unlock;
}
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+ seq_spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (!d_unhashed(dentry) && dentry->d_inode &&
ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
@@ -146,15 +146,15 @@ more:
dentry->d_name.len, dentry->d_name.name, di->offset,
filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
!dentry->d_inode ? " null" : "");
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
p = p->prev;
dentry = list_entry(p, struct dentry, d_u.d_child);
di = ceph_dentry(dentry);
}

dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&parent->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&parent->d_lock);

dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
@@ -187,12 +187,12 @@ more:
goto out;
}

- spin_lock(&parent->d_lock);
+ seq_spin_lock(&parent->d_lock);
p = p->prev; /* advance to next dentry */
goto more;

out_unlock:
- spin_unlock(&parent->d_lock);
+ seq_spin_unlock(&parent->d_lock);
out:
if (last)
dput(last);
@@ -917,10 +917,10 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
void ceph_invalidate_dentry_lease(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
dentry->d_time = jiffies;
ceph_dentry(dentry)->lease_shared_gen = 0;
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}

/*
@@ -938,7 +938,7 @@ static int dentry_lease_is_valid(struct dentry *dentry)
struct inode *dir = NULL;
u32 seq = 0;

- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
di = ceph_dentry(dentry);
if (di && di->lease_session) {
s = di->lease_session;
@@ -962,7 +962,7 @@ static int dentry_lease_is_valid(struct dentry *dentry)
}
}
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);

if (session) {
ceph_mdsc_lease_send_msg(session, dir, dentry,
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index d8858e9..11f11ed 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -804,7 +804,7 @@ static void update_dentry_lease(struct dentry *dentry,
if (dentry->d_op != &ceph_dentry_ops)
return;

- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
dentry, le16_to_cpu(lease->mask), duration, ttl);

@@ -832,7 +832,7 @@ static void update_dentry_lease(struct dentry *dentry,
di->lease_renew_from = 0;
dentry->d_time = ttl;
out_unlock:
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return;
}

@@ -858,13 +858,13 @@ static void ceph_set_dentry_offset(struct dentry *dn)
di->offset = ceph_inode(inode)->i_max_offset++;
spin_unlock(&inode->i_lock);

- spin_lock(&dir->d_lock);
- spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
+ seq_spin_lock(&dir->d_lock);
+ seq_spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&dn->d_u.d_child, &dir->d_subdirs);
dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
dn->d_u.d_child.prev, dn->d_u.d_child.next);
- spin_unlock(&dn->d_lock);
- spin_unlock(&dir->d_lock);
+ seq_spin_unlock(&dn->d_lock);
+ seq_spin_unlock(&dir->d_lock);
}

/*
@@ -1248,11 +1248,11 @@ retry_lookup:
goto retry_lookup;
} else {
/* reorder parent's d_subdirs */
- spin_lock(&parent->d_lock);
- spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
+ seq_spin_lock(&parent->d_lock);
+ seq_spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&dn->d_u.d_child, &parent->d_subdirs);
- spin_unlock(&dn->d_lock);
- spin_unlock(&parent->d_lock);
+ seq_spin_unlock(&dn->d_lock);
+ seq_spin_unlock(&parent->d_lock);
}

di = dn->d_fsdata;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 0c1d917..da64709 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1476,7 +1476,7 @@ retry:
for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
struct inode *inode;

- spin_lock(&temp->d_lock);
+ seq_spin_lock(&temp->d_lock);
inode = temp->d_inode;
if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
dout("build_path path+%d: %p SNAPDIR\n",
@@ -1487,13 +1487,13 @@ retry:
} else {
pos -= temp->d_name.len;
if (pos < 0) {
- spin_unlock(&temp->d_lock);
+ seq_spin_unlock(&temp->d_lock);
break;
}
strncpy(path + pos, temp->d_name.name,
temp->d_name.len);
}
- spin_unlock(&temp->d_lock);
+ seq_spin_unlock(&temp->d_lock);
if (pos)
path[--pos] = '/';
temp = temp->d_parent;
@@ -2758,7 +2758,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
if (!dentry)
goto release;

- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
di = ceph_dentry(dentry);
switch (h->action) {
case CEPH_MDS_LEASE_REVOKE:
@@ -2786,7 +2786,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
}
break;
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
dput(dentry);

if (!release)
@@ -2861,7 +2861,7 @@ void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
BUG_ON(mask == 0);

/* is dentry lease valid? */
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
di = ceph_dentry(dentry);
if (!di || !di->lease_session ||
di->lease_session->s_mds < 0 ||
@@ -2870,7 +2870,7 @@ void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
dout("lease_release inode %p dentry %p -- "
"no lease on %d\n",
inode, dentry, mask);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return;
}

@@ -2878,7 +2878,7 @@ void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
session = ceph_get_mds_session(di->lease_session);
seq = di->lease_seq;
__ceph_mdsc_drop_dentry_lease(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);

dout("lease_release inode %p dentry %p mask %d to mds%d\n",
inode, dentry, mask, session->s_mds);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 16cdd6d..337abab 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -88,10 +88,10 @@ cifs_bp_rename_retry:
full_path[namelen] = 0; /* trailing null */
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
- spin_lock(&temp->d_lock);
+ seq_spin_lock(&temp->d_lock);
namelen -= 1 + temp->d_name.len;
if (namelen < 0) {
- spin_unlock(&temp->d_lock);
+ seq_spin_unlock(&temp->d_lock);
break;
} else {
full_path[namelen] = dirsep;
@@ -99,7 +99,7 @@ cifs_bp_rename_retry:
temp->d_name.len);
cFYI(0, "name: %s", full_path + namelen);
}
- spin_unlock(&temp->d_lock);
+ seq_spin_unlock(&temp->d_lock);
temp = temp->d_parent;
if (temp == NULL) {
cERROR(1, "corrupt dentry");
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 6901578..93b5810 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -92,7 +92,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
struct list_head *child;
struct dentry *de;

- spin_lock(&parent->d_lock);
+ seq_spin_lock(&parent->d_lock);
list_for_each(child, &parent->d_subdirs)
{
de = list_entry(child, struct dentry, d_u.d_child);
@@ -101,7 +101,7 @@ static void coda_flag_children(struct dentry *parent, int flag)
continue;
coda_flag_inode(de->d_inode, flag);
}
- spin_unlock(&parent->d_lock);
+ seq_spin_unlock(&parent->d_lock);
return;
}

diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 82bda8f..2ebef5e 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -121,7 +121,7 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry
{
struct config_item * item = NULL;

- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (!d_unhashed(dentry)) {
struct configfs_dirent * sd = dentry->d_fsdata;
if (sd->s_type & CONFIGFS_ITEM_LINK) {
@@ -130,7 +130,7 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry
} else
item = config_item_get(sd->s_element);
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);

return item;
}
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c
index c83f476..84d7e95 100644
--- a/fs/configfs/inode.c
+++ b/fs/configfs/inode.c
@@ -250,14 +250,14 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
struct dentry * dentry = sd->s_dentry;

if (dentry) {
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (!(d_unhashed(dentry) && dentry->d_inode)) {
dget_dlock(dentry);
__d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
simple_unlink(parent->d_inode, dentry);
} else
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}
}

diff --git a/fs/dcache.c b/fs/dcache.c
index f598b98..10580be 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -171,9 +171,9 @@ static void d_free(struct dentry *dentry)
*/
static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
{
- assert_spin_locked(&dentry->d_lock);
+ assert_seq_spin_locked(&dentry->d_lock);
/* Go through a barrier */
- write_seqcount_barrier(&dentry->d_seq);
+ write_seqlock_barrier(&dentry->d_lock);
}

/*
@@ -189,7 +189,7 @@ static void dentry_iput(struct dentry * dentry)
if (inode) {
dentry->d_inode = NULL;
list_del_init(&dentry->d_alias);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
fsnotify_inoderemove(inode);
@@ -198,7 +198,7 @@ static void dentry_iput(struct dentry * dentry)
else
iput(inode);
} else {
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}
}

@@ -214,7 +214,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
dentry->d_inode = NULL;
list_del_init(&dentry->d_alias);
dentry_rcuwalk_barrier(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
fsnotify_inoderemove(inode);
@@ -292,7 +292,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
*/
dentry->d_flags |= DCACHE_DISCONNECTED;
if (parent)
- spin_unlock(&parent->d_lock);
+ seq_spin_unlock(&parent->d_lock);
dentry_iput(dentry);
/*
* dentry_iput drops the locks, at which point nobody (except
@@ -338,9 +338,9 @@ EXPORT_SYMBOL(__d_drop);

void d_drop(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}
EXPORT_SYMBOL(d_drop);

@@ -359,7 +359,7 @@ static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
inode = dentry->d_inode;
if (inode && !spin_trylock(&inode->i_lock)) {
relock:
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
cpu_relax();
return dentry; /* try again with same dentry */
}
@@ -367,7 +367,7 @@ relock:
parent = NULL;
else
parent = dentry->d_parent;
- if (parent && !spin_trylock(&parent->d_lock)) {
+ if (parent && !seq_spin_trylock(&parent->d_lock)) {
if (inode)
spin_unlock(&inode->i_lock);
goto relock;
@@ -416,11 +416,11 @@ void dput(struct dentry *dentry)
repeat:
if (dentry->d_count == 1)
might_sleep();
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
BUG_ON(!dentry->d_count);
if (dentry->d_count > 1) {
dentry->d_count--;
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return;
}

@@ -438,7 +438,7 @@ repeat:
dentry_lru_add(dentry);

dentry->d_count--;
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return;

kill_it:
@@ -465,9 +465,9 @@ int d_invalidate(struct dentry * dentry)
/*
* If it's already been dropped, return OK.
*/
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (d_unhashed(dentry)) {
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return 0;
}
/*
@@ -475,9 +475,9 @@ int d_invalidate(struct dentry * dentry)
* to get rid of unused child entries.
*/
if (!list_empty(&dentry->d_subdirs)) {
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
shrink_dcache_parent(dentry);
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
}

/*
@@ -492,13 +492,13 @@ int d_invalidate(struct dentry * dentry)
*/
if (dentry->d_count > 1) {
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return -EBUSY;
}
}

__d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
return 0;
}
EXPORT_SYMBOL(d_invalidate);
@@ -511,9 +511,9 @@ static inline void __dget_dlock(struct dentry *dentry)

static inline void __dget(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
__dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}

struct dentry *dget_parent(struct dentry *dentry)
@@ -531,16 +531,16 @@ repeat:
rcu_read_unlock();
goto out;
}
- spin_lock(&ret->d_lock);
+ seq_spin_lock(&ret->d_lock);
if (unlikely(ret != dentry->d_parent)) {
- spin_unlock(&ret->d_lock);
+ seq_spin_unlock(&ret->d_lock);
rcu_read_unlock();
goto repeat;
}
rcu_read_unlock();
BUG_ON(!ret->d_count);
ret->d_count++;
- spin_unlock(&ret->d_lock);
+ seq_spin_unlock(&ret->d_lock);
out:
return ret;
}
@@ -569,31 +569,31 @@ static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
again:
discon_alias = NULL;
list_for_each_entry(alias, &inode->i_dentry, d_alias) {
- spin_lock(&alias->d_lock);
+ seq_spin_lock(&alias->d_lock);
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
if (IS_ROOT(alias) &&
(alias->d_flags & DCACHE_DISCONNECTED)) {
discon_alias = alias;
} else if (!want_discon) {
__dget_dlock(alias);
- spin_unlock(&alias->d_lock);
+ seq_spin_unlock(&alias->d_lock);
return alias;
}
}
- spin_unlock(&alias->d_lock);
+ seq_spin_unlock(&alias->d_lock);
}
if (discon_alias) {
alias = discon_alias;
- spin_lock(&alias->d_lock);
+ seq_spin_lock(&alias->d_lock);
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
if (IS_ROOT(alias) &&
(alias->d_flags & DCACHE_DISCONNECTED)) {
__dget_dlock(alias);
- spin_unlock(&alias->d_lock);
+ seq_spin_unlock(&alias->d_lock);
return alias;
}
}
- spin_unlock(&alias->d_lock);
+ seq_spin_unlock(&alias->d_lock);
goto again;
}
return NULL;
@@ -622,16 +622,16 @@ void d_prune_aliases(struct inode *inode)
restart:
spin_lock(&inode->i_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (!dentry->d_count) {
__dget_dlock(dentry);
__d_drop(dentry);
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
dput(dentry);
goto restart;
}
- spin_unlock(&dentry->d_lock);
+ seq_spin_unlock(&dentry->d_lock);
}
spin_unlock(&inode->i_lock);
}
@@ -668,10 +668,10 @@ static void try_prune_one_dentry(struct dentry *dentry)
/* Prune ancestors. */
dentry = parent;
while (dentry) {
- spin_lock(&dentry->d_lock);
+ seq_spin_lock(&dentry->d_lock);
if (dentry->d_count > 1) {
dentry->d_count--;
- spin_unlock(&dentry->d_lock);
+ seq_spin_unloc

投稿者 xml-rpc : 2012年3月10日 00:20
役に立ちました?:
過去のフィードバック 平均:(0) 総合:(0) 投票回数:(0)
本記事へのTrackback: http://hoop.euqset.org/blog/mt-tb2006.cgi/109028
トラックバック
コメント
コメントする




画像の中に見える文字を入力してください。