diff --git a/Documentation/HOWTO b/Documentation/HOWTO index 81bc1a9..f7ade3b 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO @@ -275,8 +275,8 @@ versions. If no 2.6.x.y kernel is available, then the highest numbered 2.6.x kernel is the current stable kernel. -2.6.x.y are maintained by the "stable" team , and are -released as needs dictate. The normal release period is approximately +2.6.x.y are maintained by the "stable" team , and +are released as needs dictate. The normal release period is approximately two weeks, but it can be longer if there are no pressing problems. A security-related problem, instead, can cause a release to happen almost instantly. diff --git a/Documentation/development-process/5.Posting b/Documentation/development-process/5.Posting index 903a254..8a48c9b 100644 --- a/Documentation/development-process/5.Posting +++ b/Documentation/development-process/5.Posting @@ -271,10 +271,10 @@ copies should go to: the linux-kernel list. - If you are fixing a bug, think about whether the fix should go into the - next stable update. If so, stable@kernel.org should get a copy of the - patch. Also add a "Cc: stable@kernel.org" to the tags within the patch - itself; that will cause the stable team to get a notification when your - fix goes into the mainline. + next stable update. If so, stable@vger.kernel.org should get a copy of + the patch. Also add a "Cc: stable@vger.kernel.org" to the tags within + the patch itself; that will cause the stable team to get a notification + when your fix goes into the mainline. When selecting recipients for a patch, it is good to have an idea of who you think will eventually accept the patch and get it merged. While it diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt index a4efa04..5335fa8 100644 --- a/Documentation/usb/usbmon.txt +++ b/Documentation/usb/usbmon.txt @@ -47,10 +47,11 @@ This allows to filter away annoying devices that talk continuously. 2. Find which bus connects to the desired device -Run "cat /proc/bus/usb/devices", and find the T-line which corresponds to -the device. Usually you do it by looking for the vendor string. If you have -many similar devices, unplug one and compare two /proc/bus/usb/devices outputs. -The T-line will have a bus number. Example: +Run "cat /sys/kernel/debug/usb/devices", and find the T-line which corresponds +to the device. Usually you do it by looking for the vendor string. If you have +many similar devices, unplug one and compare the two +/sys/kernel/debug/usb/devices outputs. The T-line will have a bus number. +Example: T: Bus=03 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1 @@ -58,7 +59,10 @@ P: Vendor=0557 ProdID=2004 Rev= 1.00 S: Manufacturer=ATEN S: Product=UC100KM V2.00 -Bus=03 means it's bus 3. +"Bus=03" means it's bus 3. Alternatively, you can look at the output from +"lsusb" and get the bus number from the appropriate line. Example: + +Bus 003 Device 002: ID 0557:2004 ATEN UC100KM V2.00 3. Start 'cat' diff --git a/MAINTAINERS b/MAINTAINERS index fed9c84..7d8e486 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6048,7 +6048,7 @@ F: arch/alpha/kernel/srm_env.c STABLE BRANCH M: Greg Kroah-Hartman -L: stable@kernel.org +L: stable@vger.kernel.org S: Maintained STAGING SUBSYSTEM diff --git a/Makefile b/Makefile index f4f577b..295fbda 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 14 +SUBLEVEL = 17 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 88bd6f7..c565971 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -133,7 +133,7 @@ static struct platform_device rx51_charger_device = { static void __init rx51_charger_init(void) { WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO, - GPIOF_OUT_INIT_LOW, "isp1704_reset")); + GPIOF_OUT_INIT_HIGH, "isp1704_reset")); platform_device_register(&rx51_charger_device); } diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c index c074e66..4e0a371 100644 --- a/arch/arm/oprofile/common.c +++ b/arch/arm/oprofile/common.c @@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void __exit oprofile_arch_exit(void) +void oprofile_arch_exit(void) { oprofile_perf_exit(); } diff --git a/arch/arm/plat-mxc/pwm.c b/arch/arm/plat-mxc/pwm.c index 7a61ef8..f4b68be 100644 --- a/arch/arm/plat-mxc/pwm.c +++ b/arch/arm/plat-mxc/pwm.c @@ -32,6 +32,9 @@ #define MX3_PWMSAR 0x0C /* PWM Sample Register */ #define MX3_PWMPR 0x10 /* PWM Period Register */ #define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4) +#define MX3_PWMCR_DOZEEN (1 << 24) +#define MX3_PWMCR_WAITEN (1 << 23) +#define MX3_PWMCR_DBGEN (1 << 22) #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) #define MX3_PWMCR_CLKSRC_IPG (1 << 16) #define MX3_PWMCR_EN (1 << 0) @@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns) do_div(c, period_ns); duty_cycles = c; + /* + * according to imx pwm RM, the real period value should be + * PERIOD value in PWMPR plus 2. + */ + if (period_cycles > 2) + period_cycles -= 2; + else + period_cycles = 0; + writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR); writel(period_cycles, pwm->mmio_base + MX3_PWMPR); - cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN; + cr = MX3_PWMCR_PRESCALER(prescale) | + MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | + MX3_PWMCR_DBGEN | MX3_PWMCR_EN; if (cpu_is_mx25()) cr |= MX3_PWMCR_CLKSRC_IPG; diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index fe6f7c2..bc3c745 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h @@ -219,5 +219,7 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); extern void secondary_cpu_time_init(void); extern void iSeries_time_init_early(void); +extern void decrementer_check_overflow(void); + #endif /* __KERNEL__ */ #endif /* __POWERPC_TIME_H */ diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 70e88b2..a96989c 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -170,16 +170,13 @@ notrace void arch_local_irq_restore(unsigned long en) */ local_paca->hard_enabled = en; -#ifndef CONFIG_BOOKE - /* On server, re-trigger the decrementer if it went negative since - * some processors only trigger on edge transitions of the sign bit. - * - * BookE has a level sensitive decrementer (latches in TSR) so we - * don't need that + /* + * Trigger the decrementer if we have a pending event. Some processors + * only trigger on edge transitions of the sign bit. We might also + * have disabled interrupts long enough that the decrementer wrapped + * to positive. */ - if ((int)mfspr(SPRN_DEC) < 0) - mtspr(SPRN_DEC, 1); -#endif /* CONFIG_BOOKE */ + decrementer_check_overflow(); /* * Force the delivery of pending soft-disabled interrupts on PS3. diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 03b29a6..2de304a 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -889,6 +889,15 @@ static void __init clocksource_init(void) clock->name, clock->mult, clock->shift); } +void decrementer_check_overflow(void) +{ + u64 now = get_tb_or_rtc(); + struct decrementer_clock *decrementer = &__get_cpu_var(decrementers); + + if (now >= decrementer->next_tb) + set_dec(1); +} + static int decrementer_set_next_event(unsigned long evt, struct clock_event_device *dev) { diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index f106662..c9311cf 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c @@ -109,7 +109,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long if (opcode > MAX_HCALL_OPCODE) return; - h = &get_cpu_var(hcall_stats)[opcode / 4]; + h = &__get_cpu_var(hcall_stats)[opcode / 4]; h->tb_start = mftb(); h->purr_start = mfspr(SPRN_PURR); } @@ -126,8 +126,6 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long h->num_calls++; h->tb_total += mftb() - h->tb_start; h->purr_total += mfspr(SPRN_PURR) - h->purr_start; - - put_cpu_var(hcall_stats); } static int __init hcall_inst_init(void) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index ed96b37..81e30d9 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -745,6 +745,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args) goto out; (*depth)++; + preempt_disable(); trace_hcall_entry(opcode, args); (*depth)--; @@ -767,6 +768,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval, (*depth)++; trace_hcall_exit(opcode, retval, retbuf); + preempt_enable(); (*depth)--; out: diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index 0e358c2..422110a 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c @@ -90,7 +90,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (oprofile_started) diff --git a/arch/sh/oprofile/common.c b/arch/sh/oprofile/common.c index b4c2d2b..e4dd5d5 100644 --- a/arch/sh/oprofile/common.c +++ b/arch/sh/oprofile/common.c @@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) return oprofile_perf_init(ops); } -void __exit oprofile_arch_exit(void) +void oprofile_arch_exit(void) { oprofile_perf_exit(); kfree(sh_pmu_op_name); @@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) ops->backtrace = sh_backtrace; return -ENODEV; } -void __exit oprofile_arch_exit(void) {} +void oprofile_arch_exit(void) {} #endif /* CONFIG_HW_PERF_EVENTS */ diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 5b31a8e..a790cc6 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -431,10 +431,6 @@ extern unsigned long *sparc_valid_addr_bitmap; #define kern_addr_valid(addr) \ (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap)) -extern int io_remap_pfn_range(struct vm_area_struct *vma, - unsigned long from, unsigned long pfn, - unsigned long size, pgprot_t prot); - /* * For sparc32&64, the pfn in io_remap_pfn_range() carries in * its high 4 bits. These macros/functions put it there or get it from there. @@ -443,6 +439,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) #define GET_PFN(pfn) (pfn & 0x0fffffffUL) +extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long, + unsigned long, pgprot_t); + +static inline int io_remap_pfn_range(struct vm_area_struct *vma, + unsigned long from, unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + unsigned long long offset, space, phys_base; + + offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT; + space = GET_IOSPACE(pfn); + phys_base = offset | (space << 32ULL); + + return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); +} + #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ ({ \ diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 1e03c5a..9822628 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -750,10 +750,6 @@ static inline bool kern_addr_valid(unsigned long addr) extern int page_in_phys_avail(unsigned long paddr); -extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, - unsigned long pfn, - unsigned long size, pgprot_t prot); - /* * For sparc32&64, the pfn in io_remap_pfn_range() carries in * its high 4 bits. These macros/functions put it there or get it from there. @@ -762,6 +758,22 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4)) #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL) +extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long, + unsigned long, pgprot_t); + +static inline int io_remap_pfn_range(struct vm_area_struct *vma, + unsigned long from, unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; + int space = GET_IOSPACE(pfn); + unsigned long phys_base; + + phys_base = offset | (((unsigned long) space) << 32UL); + + return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); +} + #include /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index e27f8ea..0c218e4 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -42,6 +42,9 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr, extern void fpload(unsigned long *fpregs, unsigned long *fsr); #else /* CONFIG_SPARC32 */ + +#include + struct popc_3insn_patch_entry { unsigned int addr; unsigned int insns[3]; @@ -57,6 +60,10 @@ extern struct popc_6insn_patch_entry __popc_6insn_patch, __popc_6insn_patch_end; extern void __init per_cpu_patch(void); +extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, + struct sun4v_1insn_patch_entry *); +extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, + struct sun4v_2insn_patch_entry *); extern void __init sun4v_patch(void); extern void __init boot_cpu_id_too_large(int cpu); extern unsigned int dcache_parity_tl1_occurred; diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index 99ba5ba..8172c18 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -17,6 +17,8 @@ #include #include +#include "entry.h" + #ifdef CONFIG_SPARC64 #include @@ -220,6 +222,29 @@ int apply_relocate_add(Elf_Shdr *sechdrs, } #ifdef CONFIG_SPARC64 +static void do_patch_sections(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs) +{ + const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL; + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name)) + sun4v_1insn = s; + if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name)) + sun4v_2insn = s; + } + + if (sun4v_1insn && tlb_type == hypervisor) { + void *p = (void *) sun4v_1insn->sh_addr; + sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size); + } + if (sun4v_2insn && tlb_type == hypervisor) { + void *p = (void *) sun4v_2insn->sh_addr; + sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size); + } +} + int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) @@ -227,6 +252,8 @@ int module_finalize(const Elf_Ehdr *hdr, /* make jump label nops */ jump_label_apply_nops(me); + do_patch_sections(hdr, sechdrs); + /* Cheetah's I-cache is fully coherent. */ if (tlb_type == spitfire) { unsigned long va; diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index b01a06e..9e73c4a 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -848,10 +848,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, if (!irq) return -ENOMEM; - if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) - return -EINVAL; if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) return -EINVAL; + if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) + return -EINVAL; return irq; } diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 409cb28..5c9e2f2 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -234,40 +234,50 @@ void __init per_cpu_patch(void) } } -void __init sun4v_patch(void) +void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start, + struct sun4v_1insn_patch_entry *end) { - extern void sun4v_hvapi_init(void); - struct sun4v_1insn_patch_entry *p1; - struct sun4v_2insn_patch_entry *p2; - - if (tlb_type != hypervisor) - return; + while (start < end) { + unsigned long addr = start->addr; - p1 = &__sun4v_1insn_patch; - while (p1 < &__sun4v_1insn_patch_end) { - unsigned long addr = p1->addr; - - *(unsigned int *) (addr + 0) = p1->insn; + *(unsigned int *) (addr + 0) = start->insn; wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 0)); - p1++; + start++; } +} - p2 = &__sun4v_2insn_patch; - while (p2 < &__sun4v_2insn_patch_end) { - unsigned long addr = p2->addr; +void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, + struct sun4v_2insn_patch_entry *end) +{ + while (start < end) { + unsigned long addr = start->addr; - *(unsigned int *) (addr + 0) = p2->insns[0]; + *(unsigned int *) (addr + 0) = start->insns[0]; wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 0)); - *(unsigned int *) (addr + 4) = p2->insns[1]; + *(unsigned int *) (addr + 4) = start->insns[1]; wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 4)); - p2++; + start++; } +} + +void __init sun4v_patch(void) +{ + extern void sun4v_hvapi_init(void); + + if (tlb_type != hypervisor) + return; + + sun4v_patch_1insn_range(&__sun4v_1insn_patch, + &__sun4v_1insn_patch_end); + + sun4v_patch_2insn_range(&__sun4v_2insn_patch, + &__sun4v_2insn_patch_end); sun4v_hvapi_init(); } diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 5d92488..2e58328 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -829,21 +829,23 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -void do_signal32(sigset_t *oldset, struct pt_regs * regs, - int restart_syscall, unsigned long orig_i0) +void do_signal32(sigset_t *oldset, struct pt_regs * regs) { struct k_sigaction ka; + unsigned long orig_i0; + int restart_syscall; siginfo_t info; int signr; signr = get_signal_to_deliver(&info, &ka, regs, NULL); - /* If the debugger messes with the program counter, it clears - * the "in syscall" bit, directing us to not perform a syscall - * restart. - */ - if (restart_syscall && !pt_regs_is_syscall(regs)) - restart_syscall = 0; + restart_syscall = 0; + orig_i0 = 0; + if (pt_regs_is_syscall(regs) && + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } if (signr > 0) { if (restart_syscall) diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 04ede8f..2302567 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -525,10 +525,26 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) siginfo_t info; int signr; + /* It's a lot of work and synchronization to add a new ptrace + * register for GDB to save and restore in order to get + * orig_i0 correct for syscall restarts when debugging. + * + * Although it should be the case that most of the global + * registers are volatile across a system call, glibc already + * depends upon that fact that we preserve them. So we can't + * just use any global register to save away the orig_i0 value. + * + * In particular %g2, %g3, %g4, and %g5 are all assumed to be + * preserved across a system call trap by various pieces of + * code in glibc. + * + * %g7 is used as the "thread register". %g6 is not used in + * any fixed manner. %g6 is used as a scratch register and + * a compiler temporary, but it's value is never used across + * a system call. Therefore %g6 is usable for orig_i0 storage. + */ if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) - restart_syscall = 1; - else - restart_syscall = 0; + regs->u_regs[UREG_G6] = orig_i0; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = ¤t->saved_sigmask; @@ -541,8 +557,12 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) * the software "in syscall" bit, directing us to not perform * a syscall restart. */ - if (restart_syscall && !pt_regs_is_syscall(regs)) - restart_syscall = 0; + restart_syscall = 0; + if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } + if (signr > 0) { if (restart_syscall) diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 47509df..d58260b 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -535,11 +535,27 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) siginfo_t info; int signr; + /* It's a lot of work and synchronization to add a new ptrace + * register for GDB to save and restore in order to get + * orig_i0 correct for syscall restarts when debugging. + * + * Although it should be the case that most of the global + * registers are volatile across a system call, glibc already + * depends upon that fact that we preserve them. So we can't + * just use any global register to save away the orig_i0 value. + * + * In particular %g2, %g3, %g4, and %g5 are all assumed to be + * preserved across a system call trap by various pieces of + * code in glibc. + * + * %g7 is used as the "thread register". %g6 is not used in + * any fixed manner. %g6 is used as a scratch register and + * a compiler temporary, but it's value is never used across + * a system call. Therefore %g6 is usable for orig_i0 storage. + */ if (pt_regs_is_syscall(regs) && - (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { - restart_syscall = 1; - } else - restart_syscall = 0; + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) + regs->u_regs[UREG_G6] = orig_i0; if (current_thread_info()->status & TS_RESTORE_SIGMASK) oldset = ¤t->saved_sigmask; @@ -548,22 +564,20 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) #ifdef CONFIG_COMPAT if (test_thread_flag(TIF_32BIT)) { - extern void do_signal32(sigset_t *, struct pt_regs *, - int restart_syscall, - unsigned long orig_i0); - do_signal32(oldset, regs, restart_syscall, orig_i0); + extern void do_signal32(sigset_t *, struct pt_regs *); + do_signal32(oldset, regs); return; } #endif signr = get_signal_to_deliver(&info, &ka, regs, NULL); - /* If the debugger messes with the program counter, it clears - * the software "in syscall" bit, directing us to not perform - * a syscall restart. - */ - if (restart_syscall && !pt_regs_is_syscall(regs)) - restart_syscall = 0; + restart_syscall = 0; + if (pt_regs_is_syscall(regs) && + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } if (signr > 0) { if (restart_syscall) diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index 3635771..9384a0c 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -713,17 +713,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) s16 b = (rs2 >> (i * 16)) & 0xffff; if (a > b) - rd_val |= 1 << i; + rd_val |= 8 >> i; } break; case FCMPGT32_OPF: for (i = 0; i < 2; i++) { - s32 a = (rs1 >> (i * 32)) & 0xffff; - s32 b = (rs2 >> (i * 32)) & 0xffff; + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a > b) - rd_val |= 1 << i; + rd_val |= 2 >> i; } break; @@ -733,17 +733,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) s16 b = (rs2 >> (i * 16)) & 0xffff; if (a <= b) - rd_val |= 1 << i; + rd_val |= 8 >> i; } break; case FCMPLE32_OPF: for (i = 0; i < 2; i++) { - s32 a = (rs1 >> (i * 32)) & 0xffff; - s32 b = (rs2 >> (i * 32)) & 0xffff; + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a <= b) - rd_val |= 1 << i; + rd_val |= 2 >> i; } break; @@ -753,17 +753,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) s16 b = (rs2 >> (i * 16)) & 0xffff; if (a != b) - rd_val |= 1 << i; + rd_val |= 8 >> i; } break; case FCMPNE32_OPF: for (i = 0; i < 2; i++) { - s32 a = (rs1 >> (i * 32)) & 0xffff; - s32 b = (rs2 >> (i * 32)) & 0xffff; + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a != b) - rd_val |= 1 << i; + rd_val |= 2 >> i; } break; @@ -773,17 +773,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) s16 b = (rs2 >> (i * 16)) & 0xffff; if (a == b) - rd_val |= 1 << i; + rd_val |= 8 >> i; } break; case FCMPEQ32_OPF: for (i = 0; i < 2; i++) { - s32 a = (rs1 >> (i * 32)) & 0xffff; - s32 b = (rs2 >> (i * 32)) & 0xffff; + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a == b) - rd_val |= 1 << i; + rd_val |= 2 >> i; } break; } diff --git a/arch/sparc/lib/memcpy.S b/arch/sparc/lib/memcpy.S index 34fe657..4d8c497 100644 --- a/arch/sparc/lib/memcpy.S +++ b/arch/sparc/lib/memcpy.S @@ -7,40 +7,12 @@ * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ -#ifdef __KERNEL__ - -#define FUNC(x) \ +#define FUNC(x) \ .globl x; \ .type x,@function; \ - .align 4; \ + .align 4; \ x: -#undef FASTER_REVERSE -#undef FASTER_NONALIGNED -#define FASTER_ALIGNED - -/* In kernel these functions don't return a value. - * One should use macros in asm/string.h for that purpose. - * We return 0, so that bugs are more apparent. - */ -#define SETUP_RETL -#define RETL_INSN clr %o0 - -#else - -/* libc */ - -#include "DEFS.h" - -#define FASTER_REVERSE -#define FASTER_NONALIGNED -#define FASTER_ALIGNED - -#define SETUP_RETL mov %o0, %g6 -#define RETL_INSN mov %g6, %o0 - -#endif - /* Both these macros have to start with exactly the same insn */ #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \ ldd [%src + (offset) + 0x00], %t0; \ @@ -164,30 +136,6 @@ x: .text .align 4 -#ifdef FASTER_REVERSE - -70: /* rdword_align */ - - andcc %o1, 1, %g0 - be 4f - andcc %o1, 2, %g0 - - ldub [%o1 - 1], %g2 - sub %o1, 1, %o1 - stb %g2, [%o0 - 1] - sub %o2, 1, %o2 - be 3f - sub %o0, 1, %o0 -4: - lduh [%o1 - 2], %g2 - sub %o1, 2, %o1 - sth %g2, [%o0 - 2] - sub %o2, 2, %o2 - b 3f - sub %o0, 2, %o0 - -#endif /* FASTER_REVERSE */ - 0: retl nop ! Only bcopy returns here and it retuns void... @@ -198,7 +146,7 @@ FUNC(__memmove) #endif FUNC(memmove) cmp %o0, %o1 - SETUP_RETL + mov %o0, %g7 bleu 9f sub %o0, %o1, %o4 @@ -207,8 +155,6 @@ FUNC(memmove) bleu 0f andcc %o4, 3, %o5 -#ifndef FASTER_REVERSE - add %o1, %o2, %o1 add %o0, %o2, %o0 sub %o1, 1, %o1 @@ -224,295 +170,7 @@ FUNC(memmove) sub %o0, 1, %o0 retl - RETL_INSN - -#else /* FASTER_REVERSE */ - - add %o1, %o2, %o1 - add %o0, %o2, %o0 - bne 77f - cmp %o2, 15 - bleu 91f - andcc %o1, 3, %g0 - bne 70b -3: - andcc %o1, 4, %g0 - - be 2f - mov %o2, %g1 - - ld [%o1 - 4], %o4 - sub %g1, 4, %g1 - st %o4, [%o0 - 4] - sub %o1, 4, %o1 - sub %o0, 4, %o0 -2: - andcc %g1, 0xffffff80, %g7 - be 3f - andcc %o0, 4, %g0 - - be 74f + 4 -5: - RMOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) - subcc %g7, 128, %g7 - sub %o1, 128, %o1 - bne 5b - sub %o0, 128, %o0 -3: - andcc %g1, 0x70, %g7 - be 72f - andcc %g1, 8, %g0 - - sethi %hi(72f), %o5 - srl %g7, 1, %o4 - add %g7, %o4, %o4 - sub %o1, %g7, %o1 - sub %o5, %o4, %o5 - jmpl %o5 + %lo(72f), %g0 - sub %o0, %g7, %o0 - -71: /* rmemcpy_table */ - RMOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5) - RMOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5) - -72: /* rmemcpy_table_end */ - - be 73f - andcc %g1, 4, %g0 - - ldd [%o1 - 0x08], %g2 - sub %o0, 8, %o0 - sub %o1, 8, %o1 - st %g2, [%o0] - st %g3, [%o0 + 0x04] - -73: /* rmemcpy_last7 */ - - be 1f - andcc %g1, 2, %g0 - - ld [%o1 - 4], %g2 - sub %o1, 4, %o1 - st %g2, [%o0 - 4] - sub %o0, 4, %o0 -1: - be 1f - andcc %g1, 1, %g0 - - lduh [%o1 - 2], %g2 - sub %o1, 2, %o1 - sth %g2, [%o0 - 2] - sub %o0, 2, %o0 -1: - be 1f - nop - - ldub [%o1 - 1], %g2 - stb %g2, [%o0 - 1] -1: - retl - RETL_INSN - -74: /* rldd_std */ - RMOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) - RMOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) - subcc %g7, 128, %g7 - sub %o1, 128, %o1 - bne 74b - sub %o0, 128, %o0 - - andcc %g1, 0x70, %g7 - be 72b - andcc %g1, 8, %g0 - - sethi %hi(72b), %o5 - srl %g7, 1, %o4 - add %g7, %o4, %o4 - sub %o1, %g7, %o1 - sub %o5, %o4, %o5 - jmpl %o5 + %lo(72b), %g0 - sub %o0, %g7, %o0 - -75: /* rshort_end */ - - and %o2, 0xe, %o3 -2: - sethi %hi(76f), %o5 - sll %o3, 3, %o4 - sub %o0, %o3, %o0 - sub %o5, %o4, %o5 - sub %o1, %o3, %o1 - jmpl %o5 + %lo(76f), %g0 - andcc %o2, 1, %g0 - - RMOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3) - RMOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3) - -76: /* rshort_table_end */ - - be 1f - nop - ldub [%o1 - 1], %g2 - stb %g2, [%o0 - 1] -1: - retl - RETL_INSN - -91: /* rshort_aligned_end */ - - bne 75b - andcc %o2, 8, %g0 - - be 1f - andcc %o2, 4, %g0 - - ld [%o1 - 0x08], %g2 - ld [%o1 - 0x04], %g3 - sub %o1, 8, %o1 - st %g2, [%o0 - 0x08] - st %g3, [%o0 - 0x04] - sub %o0, 8, %o0 -1: - b 73b - mov %o2, %g1 - -77: /* rnon_aligned */ - cmp %o2, 15 - bleu 75b - andcc %o0, 3, %g0 - be 64f - andcc %o0, 1, %g0 - be 63f - andcc %o0, 2, %g0 - ldub [%o1 - 1], %g5 - sub %o1, 1, %o1 - stb %g5, [%o0 - 1] - sub %o0, 1, %o0 - be 64f - sub %o2, 1, %o2 -63: - ldub [%o1 - 1], %g5 - sub %o1, 2, %o1 - stb %g5, [%o0 - 1] - sub %o0, 2, %o0 - ldub [%o1], %g5 - sub %o2, 2, %o2 - stb %g5, [%o0] -64: - and %o1, 3, %g2 - and %o1, -4, %o1 - and %o2, 0xc, %g3 - add %o1, 4, %o1 - cmp %g3, 4 - sll %g2, 3, %g4 - mov 32, %g2 - be 4f - sub %g2, %g4, %g7 - - blu 3f - cmp %g3, 8 - - be 2f - srl %o2, 2, %g3 - - ld [%o1 - 4], %o3 - add %o0, -8, %o0 - ld [%o1 - 8], %o4 - add %o1, -16, %o1 - b 7f - add %g3, 1, %g3 -2: - ld [%o1 - 4], %o4 - add %o0, -4, %o0 - ld [%o1 - 8], %g1 - add %o1, -12, %o1 - b 8f - add %g3, 2, %g3 -3: - ld [%o1 - 4], %o5 - add %o0, -12, %o0 - ld [%o1 - 8], %o3 - add %o1, -20, %o1 - b 6f - srl %o2, 2, %g3 -4: - ld [%o1 - 4], %g1 - srl %o2, 2, %g3 - ld [%o1 - 8], %o5 - add %o1, -24, %o1 - add %o0, -16, %o0 - add %g3, -1, %g3 - - ld [%o1 + 12], %o3 -5: - sll %o5, %g4, %g2 - srl %g1, %g7, %g5 - or %g2, %g5, %g2 - st %g2, [%o0 + 12] -6: - ld [%o1 + 8], %o4 - sll %o3, %g4, %g2 - srl %o5, %g7, %g5 - or %g2, %g5, %g2 - st %g2, [%o0 + 8] -7: - ld [%o1 + 4], %g1 - sll %o4, %g4, %g2 - srl %o3, %g7, %g5 - or %g2, %g5, %g2 - st %g2, [%o0 + 4] -8: - ld [%o1], %o5 - sll %g1, %g4, %g2 - srl %o4, %g7, %g5 - addcc %g3, -4, %g3 - or %g2, %g5, %g2 - add %o1, -16, %o1 - st %g2, [%o0] - add %o0, -16, %o0 - bne,a 5b - ld [%o1 + 12], %o3 - sll %o5, %g4, %g2 - srl %g1, %g7, %g5 - srl %g4, 3, %g3 - or %g2, %g5, %g2 - add %o1, %g3, %o1 - andcc %o2, 2, %g0 - st %g2, [%o0 + 12] - be 1f - andcc %o2, 1, %g0 - - ldub [%o1 + 15], %g5 - add %o1, -2, %o1 - stb %g5, [%o0 + 11] - add %o0, -2, %o0 - ldub [%o1 + 16], %g5 - stb %g5, [%o0 + 12] -1: - be 1f - nop - ldub [%o1 + 15], %g5 - stb %g5, [%o0 + 11] -1: - retl - RETL_INSN - -#endif /* FASTER_REVERSE */ + mov %g7, %o0 /* NOTE: This code is executed just for the cases, where %src (=%o1) & 3 is != 0. @@ -546,7 +204,7 @@ FUNC(memmove) FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ sub %o0, %o1, %o4 - SETUP_RETL + mov %o0, %g7 9: andcc %o4, 3, %o5 0: @@ -569,7 +227,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ add %o1, 4, %o1 add %o0, 4, %o0 2: - andcc %g1, 0xffffff80, %g7 + andcc %g1, 0xffffff80, %g0 be 3f andcc %o0, 4, %g0 @@ -579,22 +237,23 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) - subcc %g7, 128, %g7 + sub %g1, 128, %g1 add %o1, 128, %o1 - bne 5b + cmp %g1, 128 + bge 5b add %o0, 128, %o0 3: - andcc %g1, 0x70, %g7 + andcc %g1, 0x70, %g4 be 80f andcc %g1, 8, %g0 sethi %hi(80f), %o5 - srl %g7, 1, %o4 - add %g7, %o4, %o4 - add %o1, %g7, %o1 + srl %g4, 1, %o4 + add %g4, %o4, %o4 + add %o1, %g4, %o1 sub %o5, %o4, %o5 jmpl %o5 + %lo(80f), %g0 - add %o0, %g7, %o0 + add %o0, %g4, %o0 79: /* memcpy_table */ @@ -641,43 +300,28 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ stb %g2, [%o0] 1: retl - RETL_INSN + mov %g7, %o0 82: /* ldd_std */ MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5) MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5) - subcc %g7, 128, %g7 + subcc %g1, 128, %g1 add %o1, 128, %o1 - bne 82b + cmp %g1, 128 + bge 82b add %o0, 128, %o0 -#ifndef FASTER_ALIGNED - - andcc %g1, 0x70, %g7 - be 80b - andcc %g1, 8, %g0 - - sethi %hi(80b), %o5 - srl %g7, 1, %o4 - add %g7, %o4, %o4 - add %o1, %g7, %o1 - sub %o5, %o4, %o5 - jmpl %o5 + %lo(80b), %g0 - add %o0, %g7, %o0 - -#else /* FASTER_ALIGNED */ - - andcc %g1, 0x70, %g7 + andcc %g1, 0x70, %g4 be 84f andcc %g1, 8, %g0 sethi %hi(84f), %o5 - add %o1, %g7, %o1 - sub %o5, %g7, %o5 + add %o1, %g4, %o1 + sub %o5, %g4, %o5 jmpl %o5 + %lo(84f), %g0 - add %o0, %g7, %o0 + add %o0, %g4, %o0 83: /* amemcpy_table */ @@ -721,382 +365,132 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ stb %g2, [%o0] 1: retl - RETL_INSN - -#endif /* FASTER_ALIGNED */ + mov %g7, %o0 86: /* non_aligned */ cmp %o2, 6 bleu 88f + nop -#ifdef FASTER_NONALIGNED - - cmp %o2, 256 - bcc 87f - -#endif /* FASTER_NONALIGNED */ - - andcc %o0, 3, %g0 + save %sp, -96, %sp + andcc %i0, 3, %g0 be 61f - andcc %o0, 1, %g0 + andcc %i0, 1, %g0 be 60f - andcc %o0, 2, %g0 + andcc %i0, 2, %g0 - ldub [%o1], %g5 - add %o1, 1, %o1 - stb %g5, [%o0] - sub %o2, 1, %o2 + ldub [%i1], %g5 + add %i1, 1, %i1 + stb %g5, [%i0] + sub %i2, 1, %i2 bne 61f - add %o0, 1, %o0 + add %i0, 1, %i0 60: - ldub [%o1], %g3 - add %o1, 2, %o1 - stb %g3, [%o0] - sub %o2, 2, %o2 - ldub [%o1 - 1], %g3 - add %o0, 2, %o0 - stb %g3, [%o0 - 1] + ldub [%i1], %g3 + add %i1, 2, %i1 + stb %g3, [%i0] + sub %i2, 2, %i2 + ldub [%i1 - 1], %g3 + add %i0, 2, %i0 + stb %g3, [%i0 - 1] 61: - and %o1, 3, %g2 - and %o2, 0xc, %g3 - and %o1, -4, %o1 + and %i1, 3, %g2 + and %i2, 0xc, %g3 + and %i1, -4, %i1 cmp %g3, 4 sll %g2, 3, %g4 mov 32, %g2 be 4f - sub %g2, %g4, %g7 + sub %g2, %g4, %l0 blu 3f cmp %g3, 0x8 be 2f - srl %o2, 2, %g3 + srl %i2, 2, %g3 - ld [%o1], %o3 - add %o0, -8, %o0 - ld [%o1 + 4], %o4 + ld [%i1], %i3 + add %i0, -8, %i0 + ld [%i1 + 4], %i4 b 8f add %g3, 1, %g3 2: - ld [%o1], %o4 - add %o0, -12, %o0 - ld [%o1 + 4], %o5 + ld [%i1], %i4 + add %i0, -12, %i0 + ld [%i1 + 4], %i5 add %g3, 2, %g3 b 9f - add %o1, -4, %o1 + add %i1, -4, %i1 3: - ld [%o1], %g1 - add %o0, -4, %o0 - ld [%o1 + 4], %o3 - srl %o2, 2, %g3 + ld [%i1], %g1 + add %i0, -4, %i0 + ld [%i1 + 4], %i3 + srl %i2, 2, %g3 b 7f - add %o1, 4, %o1 + add %i1, 4, %i1 4: - ld [%o1], %o5 - cmp %o2, 7 - ld [%o1 + 4], %g1 - srl %o2, 2, %g3 + ld [%i1], %i5 + cmp %i2, 7 + ld [%i1 + 4], %g1 + srl %i2, 2, %g3 bleu 10f - add %o1, 8, %o1 + add %i1, 8, %i1 - ld [%o1], %o3 + ld [%i1], %i3 add %g3, -1, %g3 5: - sll %o5, %g4, %g2 - srl %g1, %g7, %g5 + sll %i5, %g4, %g2 + srl %g1, %l0, %g5 or %g2, %g5, %g2 - st %g2, [%o0] + st %g2, [%i0] 7: - ld [%o1 + 4], %o4 + ld [%i1 + 4], %i4 sll %g1, %g4, %g2 - srl %o3, %g7, %g5 + srl %i3, %l0, %g5 or %g2, %g5, %g2 - st %g2, [%o0 + 4] + st %g2, [%i0 + 4] 8: - ld [%o1 + 8], %o5 - sll %o3, %g4, %g2 - srl %o4, %g7, %g5 + ld [%i1 + 8], %i5 + sll %i3, %g4, %g2 + srl %i4, %l0, %g5 or %g2, %g5, %g2 - st %g2, [%o0 + 8] + st %g2, [%i0 + 8] 9: - ld [%o1 + 12], %g1 - sll %o4, %g4, %g2 - srl %o5, %g7, %g5 + ld [%i1 + 12], %g1 + sll %i4, %g4, %g2 + srl %i5, %l0, %g5 addcc %g3, -4, %g3 or %g2, %g5, %g2 - add %o1, 16, %o1 - st %g2, [%o0 + 12] - add %o0, 16, %o0 + add %i1, 16, %i1 + st %g2, [%i0 + 12] + add %i0, 16, %i0 bne,a 5b - ld [%o1], %o3 + ld [%i1], %i3 10: - sll %o5, %g4, %g2 - srl %g1, %g7, %g5 - srl %g7, 3, %g3 + sll %i5, %g4, %g2 + srl %g1, %l0, %g5 + srl %l0, 3, %g3 or %g2, %g5, %g2 - sub %o1, %g3, %o1 - andcc %o2, 2, %g0 - st %g2, [%o0] + sub %i1, %g3, %i1 + andcc %i2, 2, %g0 + st %g2, [%i0] be 1f - andcc %o2, 1, %g0 - - ldub [%o1], %g2 - add %o1, 2, %o1 - stb %g2, [%o0 + 4] - add %o0, 2, %o0 - ldub [%o1 - 1], %g2 - stb %g2, [%o0 + 3] + andcc %i2, 1, %g0 + + ldub [%i1], %g2 + add %i1, 2, %i1 + stb %g2, [%i0 + 4] + add %i0, 2, %i0 + ldub [%i1 - 1], %g2 + stb %g2, [%i0 + 3] 1: be 1f nop - ldub [%o1], %g2 - stb %g2, [%o0 + 4] -1: - retl - RETL_INSN - -#ifdef FASTER_NONALIGNED - -87: /* faster_nonaligned */ - - andcc %o1, 3, %g0 - be 3f - andcc %o1, 1, %g0 - - be 4f - andcc %o1, 2, %g0 - - ldub [%o1], %g2 - add %o1, 1, %o1 - stb %g2, [%o0] - sub %o2, 1, %o2 - bne 3f - add %o0, 1, %o0 -4: - lduh [%o1], %g2 - add %o1, 2, %o1 - srl %g2, 8, %g3 - sub %o2, 2, %o2 - stb %g3, [%o0] - add %o0, 2, %o0 - stb %g2, [%o0 - 1] -3: - andcc %o1, 4, %g0 - - bne 2f - cmp %o5, 1 - - ld [%o1], %o4 - srl %o4, 24, %g2 - stb %g2, [%o0] - srl %o4, 16, %g3 - stb %g3, [%o0 + 1] - srl %o4, 8, %g2 - stb %g2, [%o0 + 2] - sub %o2, 4, %o2 - stb %o4, [%o0 + 3] - add %o1, 4, %o1 - add %o0, 4, %o0 -2: - be 33f - cmp %o5, 2 - be 32f - sub %o2, 4, %o2 -31: - ld [%o1], %g2 - add %o1, 4, %o1 - srl %g2, 24, %g3 - and %o0, 7, %g5 - stb %g3, [%o0] - cmp %g5, 7 - sll %g2, 8, %g1 - add %o0, 4, %o0 - be 41f - and %o2, 0xffffffc0, %o3 - ld [%o0 - 7], %o4 -4: - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - subcc %o3, 64, %o3 - add %o1, 64, %o1 - bne 4b - add %o0, 64, %o0 - - andcc %o2, 0x30, %o3 - be,a 1f - srl %g1, 16, %g2 -4: - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - subcc %o3, 16, %o3 - add %o1, 16, %o1 - bne 4b - add %o0, 16, %o0 - - srl %g1, 16, %g2 -1: - st %o4, [%o0 - 7] - sth %g2, [%o0 - 3] - srl %g1, 8, %g4 - b 88f - stb %g4, [%o0 - 1] -32: - ld [%o1], %g2 - add %o1, 4, %o1 - srl %g2, 16, %g3 - and %o0, 7, %g5 - sth %g3, [%o0] - cmp %g5, 6 - sll %g2, 16, %g1 - add %o0, 4, %o0 - be 42f - and %o2, 0xffffffc0, %o3 - ld [%o0 - 6], %o4 -4: - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - subcc %o3, 64, %o3 - add %o1, 64, %o1 - bne 4b - add %o0, 64, %o0 - - andcc %o2, 0x30, %o3 - be,a 1f - srl %g1, 16, %g2 -4: - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - subcc %o3, 16, %o3 - add %o1, 16, %o1 - bne 4b - add %o0, 16, %o0 - - srl %g1, 16, %g2 -1: - st %o4, [%o0 - 6] - b 88f - sth %g2, [%o0 - 2] -33: - ld [%o1], %g2 - sub %o2, 4, %o2 - srl %g2, 24, %g3 - and %o0, 7, %g5 - stb %g3, [%o0] - cmp %g5, 5 - srl %g2, 8, %g4 - sll %g2, 24, %g1 - sth %g4, [%o0 + 1] - add %o1, 4, %o1 - be 43f - and %o2, 0xffffffc0, %o3 - - ld [%o0 - 1], %o4 - add %o0, 4, %o0 -4: - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1) - SMOVE_CHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1) - SMOVE_CHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1) - SMOVE_CHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1) - subcc %o3, 64, %o3 - add %o1, 64, %o1 - bne 4b - add %o0, 64, %o0 - - andcc %o2, 0x30, %o3 - be,a 1f - srl %g1, 24, %g2 -4: - SMOVE_CHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, -1) - subcc %o3, 16, %o3 - add %o1, 16, %o1 - bne 4b - add %o0, 16, %o0 - - srl %g1, 24, %g2 -1: - st %o4, [%o0 - 5] - b 88f - stb %g2, [%o0 - 1] -41: - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - subcc %o3, 64, %o3 - add %o1, 64, %o1 - bne 41b - add %o0, 64, %o0 - - andcc %o2, 0x30, %o3 - be,a 1f - srl %g1, 16, %g2 -4: - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 8, 24, -3) - subcc %o3, 16, %o3 - add %o1, 16, %o1 - bne 4b - add %o0, 16, %o0 - - srl %g1, 16, %g2 + ldub [%i1], %g2 + stb %g2, [%i0 + 4] 1: - sth %g2, [%o0 - 3] - srl %g1, 8, %g4 - b 88f - stb %g4, [%o0 - 1] -43: - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3) - SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3) - SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3) - SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3) - subcc %o3, 64, %o3 - add %o1, 64, %o1 - bne 43b - add %o0, 64, %o0 - - andcc %o2, 0x30, %o3 - be,a 1f - srl %g1, 24, %g2 -4: - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 24, 8, 3) - subcc %o3, 16, %o3 - add %o1, 16, %o1 - bne 4b - add %o0, 16, %o0 - - srl %g1, 24, %g2 -1: - stb %g2, [%o0 + 3] - b 88f - add %o0, 4, %o0 -42: - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - SMOVE_ALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - SMOVE_ALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - SMOVE_ALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - subcc %o3, 64, %o3 - add %o1, 64, %o1 - bne 42b - add %o0, 64, %o0 - - andcc %o2, 0x30, %o3 - be,a 1f - srl %g1, 16, %g2 -4: - SMOVE_ALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5, o4, o5, g7, g1, 16, 16, -2) - subcc %o3, 16, %o3 - add %o1, 16, %o1 - bne 4b - add %o0, 16, %o0 - - srl %g1, 16, %g2 -1: - sth %g2, [%o0 - 2] - - /* Fall through */ - -#endif /* FASTER_NONALIGNED */ + ret + restore %g7, %g0, %o0 88: /* short_end */ @@ -1127,7 +521,7 @@ FUNC(memcpy) /* %o0=dst %o1=src %o2=len */ stb %g2, [%o0] 1: retl - RETL_INSN + mov %g7, %o0 90: /* short_aligned_end */ bne 88b diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile index 79836a7..3b6e248 100644 --- a/arch/sparc/mm/Makefile +++ b/arch/sparc/mm/Makefile @@ -8,7 +8,6 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o obj-y += fault_$(BITS).o obj-y += init_$(BITS).o obj-$(CONFIG_SPARC32) += loadmmu.o -obj-y += generic_$(BITS).o obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o obj-$(CONFIG_SPARC_LEON)+= leon_mm.o diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c index 5175ac2..8a7f817 100644 --- a/arch/sparc/mm/btfixup.c +++ b/arch/sparc/mm/btfixup.c @@ -302,8 +302,7 @@ void __init btfixup(void) case 'i': /* INT */ if ((insn & 0xc1c00000) == 0x01000000) /* %HI */ set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10)); - else if ((insn & 0x80002000) == 0x80002000 && - (insn & 0x01800000) != 0x01800000) /* %LO */ + else if ((insn & 0x80002000) == 0x80002000) /* %LO */ set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff)); else { prom_printf(insn_i, p, addr, insn); diff --git a/arch/sparc/mm/generic_32.c b/arch/sparc/mm/generic_32.c deleted file mode 100644 index e6067b7..0000000 --- a/arch/sparc/mm/generic_32.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * generic.c: Generic Sparc mm routines that are not dependent upon - * MMU type but are Sparc specific. - * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) - */ - -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/* Remap IO memory, the same way as remap_pfn_range(), but use - * the obio memory space. - * - * They use a pgprot that sets PAGE_IO and does not check the - * mem_map table as this is independent of normal memory. - */ -static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size, - unsigned long offset, pgprot_t prot, int space) -{ - unsigned long end; - - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - do { - set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space)); - address += PAGE_SIZE; - offset += PAGE_SIZE; - pte++; - } while (address < end); -} - -static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, - unsigned long offset, pgprot_t prot, int space) -{ - unsigned long end; - - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - offset -= address; - do { - pte_t *pte = pte_alloc_map(mm, NULL, pmd, address); - if (!pte) - return -ENOMEM; - io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address < end); - return 0; -} - -int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, - unsigned long pfn, unsigned long size, pgprot_t prot) -{ - int error = 0; - pgd_t * dir; - unsigned long beg = from; - unsigned long end = from + size; - struct mm_struct *mm = vma->vm_mm; - int space = GET_IOSPACE(pfn); - unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; - - /* See comment in mm/memory.c remap_pfn_range */ - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; - vma->vm_pgoff = (offset >> PAGE_SHIFT) | - ((unsigned long)space << 28UL); - - offset -= from; - dir = pgd_offset(mm, from); - flush_cache_range(vma, beg, end); - - while (from < end) { - pmd_t *pmd = pmd_alloc(mm, dir, from); - error = -ENOMEM; - if (!pmd) - break; - error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space); - if (error) - break; - from = (from + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } - - flush_tlb_range(vma, beg, end); - return error; -} -EXPORT_SYMBOL(io_remap_pfn_range); diff --git a/arch/sparc/mm/generic_64.c b/arch/sparc/mm/generic_64.c deleted file mode 100644 index 3cb00df..0000000 --- a/arch/sparc/mm/generic_64.c +++ /dev/null @@ -1,164 +0,0 @@ -/* - * generic.c: Generic Sparc mm routines that are not dependent upon - * MMU type but are Sparc specific. - * - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) - */ - -#include -#include -#include -#include - -#include -#include -#include -#include - -/* Remap IO memory, the same way as remap_pfn_range(), but use - * the obio memory space. - * - * They use a pgprot that sets PAGE_IO and does not check the - * mem_map table as this is independent of normal memory. - */ -static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, - unsigned long address, - unsigned long size, - unsigned long offset, pgprot_t prot, - int space) -{ - unsigned long end; - - /* clear hack bit that was used as a write_combine side-effect flag */ - offset &= ~0x1UL; - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - do { - pte_t entry; - unsigned long curend = address + PAGE_SIZE; - - entry = mk_pte_io(offset, prot, space, PAGE_SIZE); - if (!(address & 0xffff)) { - if (PAGE_SIZE < (4 * 1024 * 1024) && - !(address & 0x3fffff) && - !(offset & 0x3ffffe) && - end >= address + 0x400000) { - entry = mk_pte_io(offset, prot, space, - 4 * 1024 * 1024); - curend = address + 0x400000; - offset += 0x400000; - } else if (PAGE_SIZE < (512 * 1024) && - !(address & 0x7ffff) && - !(offset & 0x7fffe) && - end >= address + 0x80000) { - entry = mk_pte_io(offset, prot, space, - 512 * 1024 * 1024); - curend = address + 0x80000; - offset += 0x80000; - } else if (PAGE_SIZE < (64 * 1024) && - !(offset & 0xfffe) && - end >= address + 0x10000) { - entry = mk_pte_io(offset, prot, space, - 64 * 1024); - curend = address + 0x10000; - offset += 0x10000; - } else - offset += PAGE_SIZE; - } else - offset += PAGE_SIZE; - - if (pte_write(entry)) - entry = pte_mkdirty(entry); - do { - BUG_ON(!pte_none(*pte)); - set_pte_at(mm, address, pte, entry); - address += PAGE_SIZE; - pte_val(entry) += PAGE_SIZE; - pte++; - } while (address < curend); - } while (address < end); -} - -static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size, - unsigned long offset, pgprot_t prot, int space) -{ - unsigned long end; - - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - offset -= address; - do { - pte_t *pte = pte_alloc_map(mm, NULL, pmd, address); - if (!pte) - return -ENOMEM; - io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space); - pte_unmap(pte); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address < end); - return 0; -} - -static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size, - unsigned long offset, pgprot_t prot, int space) -{ - unsigned long end; - - address &= ~PUD_MASK; - end = address + size; - if (end > PUD_SIZE) - end = PUD_SIZE; - offset -= address; - do { - pmd_t *pmd = pmd_alloc(mm, pud, address); - if (!pud) - return -ENOMEM; - io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space); - address = (address + PUD_SIZE) & PUD_MASK; - pud++; - } while (address < end); - return 0; -} - -int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, - unsigned long pfn, unsigned long size, pgprot_t prot) -{ - int error = 0; - pgd_t * dir; - unsigned long beg = from; - unsigned long end = from + size; - struct mm_struct *mm = vma->vm_mm; - int space = GET_IOSPACE(pfn); - unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; - unsigned long phys_base; - - phys_base = offset | (((unsigned long) space) << 32UL); - - /* See comment in mm/memory.c remap_pfn_range */ - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; - vma->vm_pgoff = phys_base >> PAGE_SHIFT; - - offset -= from; - dir = pgd_offset(mm, from); - flush_cache_range(vma, beg, end); - - while (from < end) { - pud_t *pud = pud_alloc(mm, dir, from); - error = -ENOMEM; - if (!pud) - break; - error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space); - if (error) - break; - from = (from + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } - - flush_tlb_range(vma, beg, end); - return error; -} -EXPORT_SYMBOL(io_remap_pfn_range); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index bfab3fa..7b65f75 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; break; } if (filter[i].jt != 0) { - if (filter[i].jf) - t_offset += is_near(f_offset) ? 2 : 6; + if (filter[i].jf && f_offset) + t_offset += is_near(f_offset) ? 2 : 5; EMIT_COND_JMP(t_op, t_offset); if (filter[i].jf) EMIT_JMP(f_offset); diff --git a/block/blk-core.c b/block/blk-core.c index 69ef3f7..4e662ed 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -422,6 +422,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) q->backing_dev_info.state = 0; q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; q->backing_dev_info.name = "block"; + q->node = node_id; err = bdi_init(&q->backing_dev_info); if (err) { @@ -506,7 +507,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) if (!uninit_q) return NULL; - q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); + q = blk_init_allocated_queue(uninit_q, rfn, lock); if (!q) blk_cleanup_queue(uninit_q); @@ -518,18 +519,9 @@ struct request_queue * blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, spinlock_t *lock) { - return blk_init_allocated_queue_node(q, rfn, lock, -1); -} -EXPORT_SYMBOL(blk_init_allocated_queue); - -struct request_queue * -blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, - spinlock_t *lock, int node_id) -{ if (!q) return NULL; - q->node = node_id; if (blk_init_free_list(q)) return NULL; @@ -559,7 +551,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, return NULL; } -EXPORT_SYMBOL(blk_init_allocated_queue_node); +EXPORT_SYMBOL(blk_init_allocated_queue); int blk_get_queue(struct request_queue *q) { diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ae21919..23500ac 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3169,7 +3169,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, } } - if (ret) + if (ret && ret != -EEXIST) printk(KERN_ERR "cfq: cic link failed!\n"); return ret; @@ -3185,6 +3185,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) { struct io_context *ioc = NULL; struct cfq_io_context *cic; + int ret; might_sleep_if(gfp_mask & __GFP_WAIT); @@ -3192,6 +3193,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) if (!ioc) return NULL; +retry: cic = cfq_cic_lookup(cfqd, ioc); if (cic) goto out; @@ -3200,7 +3202,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) if (cic == NULL) goto err; - if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) + ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask); + if (ret == -EEXIST) { + /* someone has linked cic to ioc already */ + cfq_cic_free(cic); + goto retry; + } else if (ret) goto err_free; out: @@ -4015,6 +4022,11 @@ static void *cfq_init_queue(struct request_queue *q) if (blkio_alloc_blkg_stats(&cfqg->blkg)) { kfree(cfqg); + + spin_lock(&cic_index_lock); + ida_remove(&cic_index_ida, cfqd->cic_index); + spin_unlock(&cic_index_lock); + kfree(cfqd); return NULL; } diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 06ed6b4..3719c94 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev, int loading = simple_strtol(buf, NULL, 10); int i; + mutex_lock(&fw_lock); + + if (!fw_priv->fw) + goto out; + switch (loading) { case 1: - mutex_lock(&fw_lock); - if (!fw_priv->fw) { - mutex_unlock(&fw_lock); - break; - } firmware_free_data(fw_priv->fw); memset(fw_priv->fw, 0, sizeof(struct firmware)); /* If the pages are not owned by 'struct firmware' */ @@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev, fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; set_bit(FW_STATUS_LOADING, &fw_priv->status); - mutex_unlock(&fw_lock); break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { @@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev, fw_load_abort(fw_priv); break; } - +out: + mutex_unlock(&fw_lock); return count; } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7eef6e1..ef16443 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1451,6 +1451,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) diff1 = now - dev_priv->last_time1; + /* Prevent division-by-zero if we are asking too fast. + * Also, we don't get interesting results if we are polling + * faster than once in 10ms, so just return the saved value + * in such cases. + */ + if (diff1 <= 10) + return dev_priv->chipset_power; + count1 = I915_READ(DMIEC); count2 = I915_READ(DDREC); count3 = I915_READ(CSIEC); @@ -1481,6 +1489,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) dev_priv->last_count1 = total_count; dev_priv->last_time1 = now; + dev_priv->chipset_power = ret; + return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e0d0e27..335564e 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -702,6 +702,7 @@ typedef struct drm_i915_private { u64 last_count1; unsigned long last_time1; + unsigned long chipset_power; u64 last_count2; struct timespec last_time2; unsigned long gfx_power; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 21c5aa0..fe052c6 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3257,6 +3257,18 @@ int evergreen_init(struct radeon_device *rdev) rdev->accel_working = false; } } + + /* Don't start up if the MC ucode is missing on BTC parts. + * The default clocks and voltages before the MC ucode + * is loaded are not suffient for advanced operations. + */ + if (ASIC_IS_DCE5(rdev)) { + if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) { + DRM_ERROR("radeon: MC ucode required for NI+.\n"); + return -EINVAL; + } + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 285acc4..a098edc 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2568,7 +2568,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; - rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; + if (rdev->pm.default_power_state_index >= 0) + rdev->pm.current_vddc = + rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; + else + rdev->pm.current_vddc = 0; } void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index d8ca0a0..65df26c 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2076,9 +2076,11 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd) static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, u32 updegr, u32 egrhd, u32 npkts) { - qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); + mmiowb(); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + mmiowb(); } static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index c765a2e..759bb63 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -2704,9 +2704,11 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what) static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, u32 updegr, u32 egrhd, u32 npkts) { - qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); + mmiowb(); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + mmiowb(); } static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 8ec5237..49e4a58 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -4060,10 +4060,12 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, */ if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) adjust_rcv_timeout(rcd, npkts); - qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); - qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); if (updegr) qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt); + mmiowb(); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); + mmiowb(); } static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd) diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index e06e045..6ad728f 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -24,6 +24,7 @@ */ #include +#include #include #include #include @@ -760,6 +761,16 @@ static int synaptics_reconnect(struct psmouse *psmouse) do { psmouse_reset(psmouse); + if (retry) { + /* + * On some boxes, right after resuming, the touchpad + * needs some time to finish initializing (I assume + * it needs time to calibrate) and start responding + * to Synaptics-specific queries, so let's wait a + * bit. + */ + ssleep(1); + } error = synaptics_detect(psmouse, 0); } while (error && ++retry < 3); diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c index bdf19ad..e9babcb 100644 --- a/drivers/media/video/s5p-fimc/fimc-core.c +++ b/drivers/media/video/s5p-fimc/fimc-core.c @@ -36,7 +36,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = { static struct fimc_fmt fimc_formats[] = { { .name = "RGB565", - .fourcc = V4L2_PIX_FMT_RGB565X, + .fourcc = V4L2_PIX_FMT_RGB565, .depth = { 16 }, .color = S5P_FIMC_RGB565, .memplanes = 1, diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index b8f2a4e..f82413a 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -362,13 +362,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } - sid = twl_map[mod_no].sid; - twl = &twl_modules[sid]; - if (unlikely(!inuse)) { - pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); + pr_err("%s: not initialized\n", DRIVER_NAME); return -EPERM; } + sid = twl_map[mod_no].sid; + twl = &twl_modules[sid]; + mutex_lock(&twl->xfer_lock); /* * [MSG1]: fill the register address data @@ -419,13 +419,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } - sid = twl_map[mod_no].sid; - twl = &twl_modules[sid]; - if (unlikely(!inuse)) { - pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); + pr_err("%s: not initialized\n", DRIVER_NAME); return -EPERM; } + sid = twl_map[mod_no].sid; + twl = &twl_modules[sid]; + mutex_lock(&twl->xfer_lock); /* [MSG1] fill the register address data */ msg = &twl->xfer_msg[0]; diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c index 3941ddc..834f824 100644 --- a/drivers/mfd/twl4030-madc.c +++ b/drivers/mfd/twl4030-madc.c @@ -510,8 +510,9 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req) u8 ch_msb, ch_lsb; int ret; - if (!req) + if (!req || !twl4030_madc) return -EINVAL; + mutex_lock(&twl4030_madc->lock); if (req->method < TWL4030_MADC_RT || req->method > TWL4030_MADC_SW2) { ret = -EINVAL; @@ -530,13 +531,13 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req) if (ret) { dev_err(twl4030_madc->dev, "unable to write sel register 0x%X\n", method->sel + 1); - return ret; + goto out; } ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->sel); if (ret) { dev_err(twl4030_madc->dev, "unable to write sel register 0x%X\n", method->sel + 1); - return ret; + goto out; } /* Select averaging for all channels if do_avg is set */ if (req->do_avg) { @@ -546,7 +547,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req) dev_err(twl4030_madc->dev, "unable to write avg register 0x%X\n", method->avg + 1); - return ret; + goto out; } ret = twl_i2c_write_u8(TWL4030_MODULE_MADC, ch_lsb, method->avg); @@ -554,7 +555,7 @@ int twl4030_madc_conversion(struct twl4030_madc_request *req) dev_err(twl4030_madc->dev, "unable to write sel reg 0x%X\n", method->sel + 1); - return ret; + goto out; } } if (req->type == TWL4030_MADC_IRQ_ONESHOT && req->func_cb != NULL) { @@ -706,6 +707,8 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev) if (!madc) return -ENOMEM; + madc->dev = &pdev->dev; + /* * Phoenix provides 2 interrupt lines. The first one is connected to * the OMAP. The other one can be connected to the other processor such @@ -737,6 +740,28 @@ static int __devinit twl4030_madc_probe(struct platform_device *pdev) TWL4030_BCI_BCICTL1); goto err_i2c; } + + /* Check that MADC clock is on */ + ret = twl_i2c_read_u8(TWL4030_MODULE_INTBR, ®val, TWL4030_REG_GPBR1); + if (ret) { + dev_err(&pdev->dev, "unable to read reg GPBR1 0x%X\n", + TWL4030_REG_GPBR1); + goto err_i2c; + } + + /* If MADC clk is not on, turn it on */ + if (!(regval & TWL4030_GPBR1_MADC_HFCLK_EN)) { + dev_info(&pdev->dev, "clk disabled, enabling\n"); + regval |= TWL4030_GPBR1_MADC_HFCLK_EN; + ret = twl_i2c_write_u8(TWL4030_MODULE_INTBR, regval, + TWL4030_REG_GPBR1); + if (ret) { + dev_err(&pdev->dev, "unable to write reg GPBR1 0x%X\n", + TWL4030_REG_GPBR1); + goto err_i2c; + } + } + platform_set_drvdata(pdev, madc); mutex_init(&madc->lock); ret = request_threaded_irq(platform_get_irq(pdev, 0), NULL, diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index fe14072..9394d0b 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -557,7 +557,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, unsigned int status) { /* First check for errors */ - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { + if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| + MCI_TXUNDERRUN|MCI_RXOVERRUN)) { u32 remain, success; /* Terminate the DMA transfer */ @@ -636,8 +637,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, } if (!cmd->data || cmd->error) { - if (host->data) + if (host->data) { + /* Terminate the DMA transfer */ + if (dma_inprogress(host)) + mmci_dma_data_error(host); mmci_stop_data(host); + } mmci_request_end(host, cmd->mrq); } else if (!(cmd->data->flags & MMC_DATA_READ)) { mmci_start_data(host, cmd->data); @@ -837,8 +842,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); data = host->data; - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| - MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) + if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| + MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| + MCI_DATABLOCKEND) && data) mmci_data_irq(host, data, status); cmd = host->cmd; diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index d4455ff..52f4b64 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04; static int firmware_rom_wait_states = 0x1C; #endif -module_param(firmware_rom_wait_states, bool, 0644); +module_param(firmware_rom_wait_states, int, 0644); MODULE_PARM_DESC(firmware_rom_wait_states, "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2065cb4..0b65c5f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1905,7 +1905,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) "but new slave device does not support netpoll.\n", bond_dev->name); res = -EBUSY; - goto err_close; + goto err_detach; } } #endif @@ -1914,7 +1914,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = bond_create_slave_symlinks(bond_dev, slave_dev); if (res) - goto err_close; + goto err_detach; res = netdev_rx_handler_register(slave_dev, bond_handle_frame, new_slave); @@ -1935,6 +1935,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) err_dest_symlinks: bond_destroy_slave_symlinks(bond_dev, slave_dev); +err_detach: + write_lock_bh(&bond->lock); + bond_detach_slave(bond, new_slave); + write_unlock_bh(&bond->lock); + err_close: dev_close(slave_dev); diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index 1286fe2..4b3a68b 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c @@ -418,10 +418,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); opt->src_addr = sp->sa_addr.pptp; - if (add_chan(po)) { - release_sock(sk); + if (add_chan(po)) error = -EBUSY; - } release_sock(sk); return error; diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index c5c4b4d..7105577 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -371,7 +371,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb) skb_pull(skb, (size + 1) & 0xfffe); - if (skb->len == 0) + if (skb->len < sizeof(header)) break; head = (u8 *) skb->data; @@ -1560,6 +1560,10 @@ static const struct usb_device_id products [] = { // ASIX 88772a USB_DEVICE(0x0db0, 0xa877), .driver_info = (unsigned long) &ax88772_info, +}, { + // Asus USB Ethernet Adapter + USB_DEVICE (0x0b95, 0x7e2b), + .driver_info = (unsigned long) &ax88772_info, }, { }, // END }; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 5362306..a126a3e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1828,6 +1828,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw, struct ath_softc *sc = hw->priv; struct ath_node *an = (struct ath_node *) sta->drv_priv; + if (!(sc->sc_flags & SC_OP_TXAGGR)) + return; + switch (cmd) { case STA_NOTIFY_SLEEP: an->sleeping = true; diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index ba7f36a..ea35843 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1252,7 +1252,9 @@ static void ath_rc_init(struct ath_softc *sc, ath_rc_priv->max_valid_rate = k; ath_rc_sort_validrates(rate_table, ath_rc_priv); - ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; + ath_rc_priv->rate_max_phy = (k > 4) ? + ath_rc_priv->valid_rate_index[k-4] : + ath_rc_priv->valid_rate_index[k-1]; ath_rc_priv->rate_table = rate_table; ath_dbg(common, ATH_DBG_CONFIG, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index b849ad7..39a3c9c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -490,8 +490,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed) if (ctx->ht.enabled) { /* if HT40 is used, it should not change * after associated except channel switch */ - if (iwl_is_associated_ctx(ctx) && - !ctx->ht.is_40mhz) + if (!ctx->ht.is_40mhz || + !iwl_is_associated_ctx(ctx)) iwlagn_config_ht40(conf, ctx); } else ctx->ht.is_40mhz = false; diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 4974cd7..67cd2e3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -385,7 +385,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, tx_cmd->tid_tspec = qc[0] & 0xf; tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; } else { - tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) + tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; + else + tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; } priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); @@ -775,10 +778,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); - /* Set up entry for this TFD in Tx byte-count array */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) - iwlagn_txq_update_byte_cnt_tbl(priv, txq, - le16_to_cpu(tx_cmd->len)); + iwlagn_txq_update_byte_cnt_tbl(priv, txq, le16_to_cpu(tx_cmd->len)); pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, firstlen, PCI_DMA_BIDIRECTIONAL); diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index 6e7fe94..d4e9eac 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -878,6 +878,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x13b1, 0x0031) }, { USB_DEVICE(0x1737, 0x0070) }, { USB_DEVICE(0x1737, 0x0071) }, + { USB_DEVICE(0x1737, 0x0077) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0162) }, { USB_DEVICE(0x0789, 0x0163) }, @@ -1069,7 +1070,6 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x1740, 0x0605) }, { USB_DEVICE(0x1740, 0x0615) }, /* Linksys */ - { USB_DEVICE(0x1737, 0x0077) }, { USB_DEVICE(0x1737, 0x0078) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0168) }, diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c index b07f8b7..e0e1688 100644 --- a/drivers/net/wireless/wl12xx/boot.c +++ b/drivers/net/wireless/wl12xx/boot.c @@ -328,6 +328,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) nvs_ptr += 3; for (i = 0; i < burst_len; i++) { + if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + val = (nvs_ptr[0] | (nvs_ptr[1] << 8) | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24)); @@ -339,6 +342,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) nvs_ptr += 4; dest_addr += 4; } + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; } /* @@ -350,6 +356,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) */ nvs_ptr = (u8 *)wl->nvs + ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4); + + if (nvs_ptr >= (u8 *) wl->nvs + nvs_len) + goto out_badnvs; + nvs_len -= nvs_ptr - (u8 *)wl->nvs; /* Now we must set the partition correctly */ @@ -365,6 +375,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl) kfree(nvs_aligned); return 0; + +out_badnvs: + wl1271_error("nvs data is malformed"); + return -EILSEQ; } static void wl1271_boot_enable_interrupts(struct wl1271 *wl) diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c index 42935ac..b8ec8cd 100644 --- a/drivers/net/wireless/wl12xx/cmd.c +++ b/drivers/net/wireless/wl12xx/cmd.c @@ -121,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) if (!wl->nvs) return -ENODEV; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from INI out of bounds"); + return -EINVAL; + } + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); if (!gen_parms) return -ENOMEM; @@ -144,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl) gp->tx_bip_fem_manufacturer = gen_parms->general_params.tx_bip_fem_manufacturer; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from FW out of bounds"); + ret = -EINVAL; + goto out; + } + wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); @@ -163,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl) if (!wl->nvs) return -ENODEV; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from ini out of bounds"); + return -EINVAL; + } + gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); if (!gen_parms) return -ENOMEM; @@ -187,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl) gp->tx_bip_fem_manufacturer = gen_parms->general_params.tx_bip_fem_manufacturer; + if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) { + wl1271_warning("FEM index from FW out of bounds"); + ret = -EINVAL; + goto out; + } + wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n", answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer); diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index 89f6345..84a208d 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c @@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; retval = oprofile_set_timeout(val); @@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; retval = oprofile_set_ulong(&oprofile_backtrace_depth, val); @@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; + retval = 0; if (val) retval = oprofile_start(); else diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index d0de6cc..2f0aa0f 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou } +/* + * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains + * unchanged and might be uninitialized. This follows write syscall + * implementation when count is zero: "If count is zero ... [and if] + * no errors are detected, 0 will be returned without causing any + * other effect." (man 2 write) + */ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count) { char tmpbuf[TMPBUFSIZE]; @@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_ raw_spin_lock_irqsave(&oprofilefs_lock, flags); *val = simple_strtoul(tmpbuf, NULL, 0); raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); - return 0; + return count; } @@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_ return -EINVAL; retval = oprofilefs_ulong_from_user(&value, buf, count); - if (retval) + if (retval <= 0) return retval; retval = oprofile_set_ulong(file->private_data, value); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index bbb6f85..eb4c883 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -318,20 +318,6 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) } EXPORT_SYMBOL_GPL(rtc_read_alarm); -static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) -{ - int err; - - if (!rtc->ops) - err = -ENODEV; - else if (!rtc->ops->set_alarm) - err = -EINVAL; - else - err = rtc->ops->set_alarm(rtc->dev.parent, alarm); - - return err; -} - static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) { struct rtc_time tm; @@ -355,7 +341,14 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) * over right here, before we set the alarm. */ - return ___rtc_set_alarm(rtc, alarm); + if (!rtc->ops) + err = -ENODEV; + else if (!rtc->ops->set_alarm) + err = -EINVAL; + else + err = rtc->ops->set_alarm(rtc->dev.parent, alarm); + + return err; } int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) @@ -769,20 +762,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) return 0; } -static void rtc_alarm_disable(struct rtc_device *rtc) -{ - struct rtc_wkalrm alarm; - struct rtc_time tm; - - __rtc_read_time(rtc, &tm); - - alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm), - ktime_set(300, 0))); - alarm.enabled = 0; - - ___rtc_set_alarm(rtc, &alarm); -} - /** * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue * @rtc rtc device @@ -804,10 +783,8 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) struct rtc_wkalrm alarm; int err; next = timerqueue_getnext(&rtc->timerqueue); - if (!next) { - rtc_alarm_disable(rtc); + if (!next) return; - } alarm.time = rtc_ktime_to_tm(next->expires); alarm.enabled = 1; err = __rtc_set_alarm(rtc, &alarm); @@ -869,8 +846,7 @@ again: err = __rtc_set_alarm(rtc, &alarm); if (err == -ETIME) goto again; - } else - rtc_alarm_disable(rtc); + } mutex_unlock(&rtc->ops_lock); } diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index eda128f..64aedd8 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t) static struct rtc_class_ops m41t80_rtc_ops = { .read_time = m41t80_rtc_read_time, .set_time = m41t80_rtc_set_time, + /* + * XXX - m41t80 alarm functionality is reported broken. + * until it is fixed, don't register alarm functions. + * .read_alarm = m41t80_rtc_read_alarm, .set_alarm = m41t80_rtc_set_alarm, + */ .proc = m41t80_rtc_proc, + /* + * See above comment on broken alarm + * .alarm_irq_enable = m41t80_rtc_alarm_irq_enable, + */ }; #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 2a4991d..3a417df 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -57,6 +57,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); + /* if previous slave_alloc returned early, there is nothing to do */ + if (!zfcp_sdev->port) + return; + zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); put_device(&zfcp_sdev->port->dev); } diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 83035bd..39e81cd 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1082,41 +1082,6 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) } /** - * _base_save_msix_table - backup msix vector table - * @ioc: per adapter object - * - * This address an errata where diag reset clears out the table - */ -static void -_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc) -{ - int i; - - if (!ioc->msix_enable || ioc->msix_table_backup == NULL) - return; - - for (i = 0; i < ioc->msix_vector_count; i++) - ioc->msix_table_backup[i] = ioc->msix_table[i]; -} - -/** - * _base_restore_msix_table - this restores the msix vector table - * @ioc: per adapter object - * - */ -static void -_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc) -{ - int i; - - if (!ioc->msix_enable || ioc->msix_table_backup == NULL) - return; - - for (i = 0; i < ioc->msix_vector_count; i++) - ioc->msix_table[i] = ioc->msix_table_backup[i]; -} - -/** * _base_check_enable_msix - checks MSIX capabable. * @ioc: per adapter object * @@ -1128,7 +1093,7 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) { int base; u16 message_control; - u32 msix_table_offset; + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); if (!base) { @@ -1141,14 +1106,8 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) pci_read_config_word(ioc->pdev, base + 2, &message_control); ioc->msix_vector_count = (message_control & 0x3FF) + 1; - /* get msix table */ - pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset); - msix_table_offset &= 0xFFFFFFF8; - ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset); - dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, " - "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name, - ioc->msix_vector_count, msix_table_offset, ioc->msix_table)); + "vector_count(%d)\n", ioc->name, ioc->msix_vector_count)); return 0; } @@ -1162,8 +1121,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc) { if (ioc->msix_enable) { pci_disable_msix(ioc->pdev); - kfree(ioc->msix_table_backup); - ioc->msix_table_backup = NULL; ioc->msix_enable = 0; } } @@ -1189,14 +1146,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) if (_base_check_enable_msix(ioc) != 0) goto try_ioapic; - ioc->msix_table_backup = kcalloc(ioc->msix_vector_count, - sizeof(u32), GFP_KERNEL); - if (!ioc->msix_table_backup) { - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for " - "msix_table_backup failed!!!\n", ioc->name)); - goto try_ioapic; - } - memset(&entries, 0, sizeof(struct msix_entry)); r = pci_enable_msix(ioc->pdev, &entries, 1); if (r) { @@ -3513,9 +3462,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u32 hcb_size; printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name); - - _base_save_msix_table(ioc); - drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n", ioc->name)); @@ -3611,7 +3557,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) goto out; } - _base_restore_msix_table(ioc); printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name); return 0; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 41a57a7..e1735f9 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -626,8 +626,6 @@ struct mpt2sas_port_facts { * @wait_for_port_enable_to_complete: * @msix_enable: flag indicating msix is enabled * @msix_vector_count: number msix vectors - * @msix_table: virt address to the msix table - * @msix_table_backup: backup msix table * @scsi_io_cb_idx: shost generated commands * @tm_cb_idx: task management commands * @scsih_cb_idx: scsih internal commands @@ -768,8 +766,6 @@ struct MPT2SAS_ADAPTER { u8 msix_enable; u16 msix_vector_count; - u32 *msix_table; - u32 *msix_table_backup; u32 ioc_reset_count; /* internal commands, callback index */ diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 5690f09..c79857e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -4145,7 +4145,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle) /* insert into event log */ sz = offsetof(Mpi2EventNotificationReply_t, EventData) + sizeof(Mpi2EventDataSasDeviceStatusChange_t); - event_reply = kzalloc(sz, GFP_KERNEL); + event_reply = kzalloc(sz, GFP_ATOMIC); if (!event_reply) { printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -7211,6 +7211,7 @@ _scsih_remove(struct pci_dev *pdev) } sas_remove_host(shost); + mpt2sas_base_detach(ioc); list_del(&ioc->list); scsi_remove_host(shost); scsi_host_put(shost); diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c index d6620ad..c828151 100644 --- a/drivers/ssb/driver_pcicore.c +++ b/drivers/ssb/driver_pcicore.c @@ -516,10 +516,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc) static void ssb_pcicore_init_clientmode(struct ssb_pcicore *pc) { - ssb_pcicore_fix_sprom_core_index(pc); + struct ssb_device *pdev = pc->dev; + struct ssb_bus *bus = pdev->bus; + + if (bus->bustype == SSB_BUSTYPE_PCI) + ssb_pcicore_fix_sprom_core_index(pc); /* Disable PCI interrupts. */ - ssb_write32(pc->dev, SSB_INTVEC, 0); + ssb_write32(pdev, SSB_INTVEC, 0); /* Additional PCIe always once-executed workarounds */ if (pc->dev->id.coreid == SSB_DEV_PCIE) { diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index af9b781..b989495 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -199,8 +199,9 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); unsigned int mode; + unsigned long flags; - spin_lock(&port->lock); + spin_lock_irqsave(&port->lock, flags); /* Disable interrupts */ UART_PUT_IDR(port, atmel_port->tx_done_mask); @@ -231,7 +232,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) /* Enable interrupts */ UART_PUT_IER(port, atmel_port->tx_done_mask); - spin_unlock(&port->lock); + spin_unlock_irqrestore(&port->lock, flags); } diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 8faa23c..158f631 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -554,10 +554,18 @@ static void acm_port_down(struct acm *acm) static void acm_tty_hangup(struct tty_struct *tty) { - struct acm *acm = tty->driver_data; - tty_port_hangup(&acm->port); + struct acm *acm; + mutex_lock(&open_mutex); + acm = tty->driver_data; + + if (!acm) + goto out; + + tty_port_hangup(&acm->port); acm_port_down(acm); + +out: mutex_unlock(&open_mutex); } @@ -1183,6 +1191,8 @@ made_compressed_probe: i = device_create_file(&intf->dev, &dev_attr_wCountryCodes); if (i < 0) { kfree(acm->country_codes); + acm->country_codes = NULL; + acm->country_code_size = 0; goto skip_countries; } @@ -1191,6 +1201,8 @@ made_compressed_probe: if (i < 0) { device_remove_file(&intf->dev, &dev_attr_wCountryCodes); kfree(acm->country_codes); + acm->country_codes = NULL; + acm->country_code_size = 0; goto skip_countries; } } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index cec6dfd..24b40ac 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1387,11 +1387,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, ret = -EAGAIN; else urb->transfer_flags |= URB_DMA_MAP_SG; - if (n != urb->num_sgs) { - urb->num_sgs = n; + urb->num_mapped_sgs = n; + if (n != urb->num_sgs) urb->transfer_flags |= URB_DMA_SG_COMBINED; - } } else if (urb->sg) { struct scatterlist *sg = urb->sg; urb->transfer_dma = dma_map_page( diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index ecf12e1..4c65eb6 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -117,9 +117,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x06a3, 0x0006), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, - /* Guillemot Webcam Hercules Dualpix Exchange*/ + /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */ { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Guillemot Webcam Hercules Dualpix Exchange*/ + { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME }, + /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c index 0917e3a..2499b3b 100644 --- a/drivers/usb/host/ehci-q.c +++ b/drivers/usb/host/ehci-q.c @@ -649,7 +649,7 @@ qh_urb_transaction ( /* * data transfer stage: buffer setup */ - i = urb->num_sgs; + i = urb->num_mapped_sgs; if (len > 0 && i > 0) { sg = urb->sg; buf = sg_dma_address(sg); diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 5fc952d..43ae966 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -389,17 +389,14 @@ ohci_shutdown (struct usb_hcd *hcd) struct ohci_hcd *ohci; ohci = hcd_to_ohci (hcd); - ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable); - ohci->hc_control = ohci_readl(ohci, &ohci->regs->control); + ohci_writel(ohci, (u32) ~0, &ohci->regs->intrdisable); - /* If the SHUTDOWN quirk is set, don't put the controller in RESET */ - ohci->hc_control &= (ohci->flags & OHCI_QUIRK_SHUTDOWN ? - OHCI_CTRL_RWC | OHCI_CTRL_HCFS : - OHCI_CTRL_RWC); - ohci_writel(ohci, ohci->hc_control, &ohci->regs->control); + /* Software reset, after which the controller goes into SUSPEND */ + ohci_writel(ohci, OHCI_HCR, &ohci->regs->cmdstatus); + ohci_readl(ohci, &ohci->regs->cmdstatus); /* flush the writes */ + udelay(10); - /* flush the writes */ - (void) ohci_readl (ohci, &ohci->regs->control); + ohci_writel(ohci, ohci->fminterval, &ohci->regs->fminterval); } static int check_ed(struct ohci_hcd *ohci, struct ed *ed) diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index ad8166c..bc01b06 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -175,28 +175,6 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) return 0; } -/* nVidia controllers continue to drive Reset signalling on the bus - * even after system shutdown, wasting power. This flag tells the - * shutdown routine to leave the controller OPERATIONAL instead of RESET. - */ -static int ohci_quirk_nvidia_shutdown(struct usb_hcd *hcd) -{ - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - struct ohci_hcd *ohci = hcd_to_ohci(hcd); - - /* Evidently nVidia fixed their later hardware; this is a guess at - * the changeover point. - */ -#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB 0x026d - - if (pdev->device < PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_USB) { - ohci->flags |= OHCI_QUIRK_SHUTDOWN; - ohci_dbg(ohci, "enabled nVidia shutdown quirk\n"); - } - - return 0; -} - static void sb800_prefetch(struct ohci_hcd *ohci, int on) { struct pci_dev *pdev; @@ -260,10 +238,6 @@ static const struct pci_device_id ohci_pci_quirks[] = { PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), .driver_data = (unsigned long)ohci_quirk_amd700, }, - { - PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), - .driver_data = (unsigned long) ohci_quirk_nvidia_shutdown, - }, /* FIXME for some of the early AMD 760 southbridges, OHCI * won't work at all. blacklist them. diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index 35e5fd6..0795b93 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h @@ -403,7 +403,6 @@ struct ohci_hcd { #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ -#define OHCI_QUIRK_SHUTDOWN 0x800 /* nVidia power bug */ // there are also chip quirks/bugs in init logic struct work_struct nec_work; /* Worker for NEC quirk */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a495d48..23e04fb 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -36,6 +36,7 @@ #define OHCI_INTRENABLE 0x10 #define OHCI_INTRDISABLE 0x14 #define OHCI_FMINTERVAL 0x34 +#define OHCI_HCFS (3 << 6) /* hc functional state */ #define OHCI_HCR (1 << 0) /* host controller reset */ #define OHCI_OCR (1 << 3) /* ownership change request */ #define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ @@ -465,6 +466,8 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) { void __iomem *base; u32 control; + u32 fminterval; + int cnt; if (!mmio_resource_enabled(pdev, 0)) return; @@ -497,41 +500,32 @@ static void __devinit quirk_usb_handoff_ohci(struct pci_dev *pdev) } #endif - /* reset controller, preserving RWC (and possibly IR) */ - writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); - readl(base + OHCI_CONTROL); + /* disable interrupts */ + writel((u32) ~0, base + OHCI_INTRDISABLE); - /* Some NVIDIA controllers stop working if kept in RESET for too long */ - if (pdev->vendor == PCI_VENDOR_ID_NVIDIA) { - u32 fminterval; - int cnt; + /* Reset the USB bus, if the controller isn't already in RESET */ + if (control & OHCI_HCFS) { + /* Go into RESET, preserving RWC (and possibly IR) */ + writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); + readl(base + OHCI_CONTROL); - /* drive reset for at least 50 ms (7.1.7.5) */ + /* drive bus reset for at least 50 ms (7.1.7.5) */ msleep(50); + } - /* software reset of the controller, preserving HcFmInterval */ - fminterval = readl(base + OHCI_FMINTERVAL); - writel(OHCI_HCR, base + OHCI_CMDSTATUS); + /* software reset of the controller, preserving HcFmInterval */ + fminterval = readl(base + OHCI_FMINTERVAL); + writel(OHCI_HCR, base + OHCI_CMDSTATUS); - /* reset requires max 10 us delay */ - for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ - if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) - break; - udelay(1); - } - writel(fminterval, base + OHCI_FMINTERVAL); - - /* Now we're in the SUSPEND state with all devices reset - * and wakeups and interrupts disabled - */ + /* reset requires max 10 us delay */ + for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ + if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) + break; + udelay(1); } + writel(fminterval, base + OHCI_FMINTERVAL); - /* - * disable interrupts - */ - writel(~(u32)0, base + OHCI_INTRDISABLE); - writel(~(u32)0, base + OHCI_INTRSTATUS); - + /* Now the controller is safely in SUSPEND and nothing can wake it up */ iounmap(base); } diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c index 84ed28b..8253991 100644 --- a/drivers/usb/host/uhci-q.c +++ b/drivers/usb/host/uhci-q.c @@ -943,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, if (usb_pipein(urb->pipe)) status |= TD_CTRL_SPD; - i = urb->num_sgs; + i = urb->num_mapped_sgs; if (len > 0 && i > 0) { sg = urb->sg; data = sg_dma_address(sg); diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c index a403b53..76083ae 100644 --- a/drivers/usb/host/whci/qset.c +++ b/drivers/usb/host/whci/qset.c @@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u remaining = urb->transfer_buffer_length; - for_each_sg(urb->sg, sg, urb->num_sgs, i) { + for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { dma_addr_t dma_addr; size_t dma_remaining; dma_addr_t sp, ep; @@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, remaining = urb->transfer_buffer_length; - for_each_sg(urb->sg, sg, urb->num_sgs, i) { + for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { size_t len; size_t sg_remaining; void *orig; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index b4b0691..c0c5d6c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2570,7 +2570,7 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) struct scatterlist *sg; sg = NULL; - num_sgs = urb->num_sgs; + num_sgs = urb->num_mapped_sgs; temp = urb->transfer_buffer_length; xhci_dbg(xhci, "count sg list trbs: \n"); @@ -2754,7 +2754,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, return -EINVAL; num_trbs = count_sg_trbs_needed(xhci, urb); - num_sgs = urb->num_sgs; + num_sgs = urb->num_mapped_sgs; total_packet_count = roundup(urb->transfer_buffer_length, le16_to_cpu(urb->ep->desc.wMaxPacketSize)); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 221f14e..107438e 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1568,6 +1568,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, /* FIXME: can we allocate more resources for the HC? */ break; case COMP_BW_ERR: + case COMP_2ND_BW_ERR: dev_warn(&udev->dev, "Not enough bandwidth " "for new device state.\n"); ret = -ENOSPC; @@ -2183,8 +2184,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci, if (ret < 0) return ret; - max_streams = USB_SS_MAX_STREAMS( - eps[i]->ss_ep_comp.bmAttributes); + max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp); if (max_streams < (*num_streams - 1)) { xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n", eps[i]->desc.bEndpointAddress, diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 49ce76c..3e7c3a6 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -900,7 +900,6 @@ struct xhci_transfer_event { /* Invalid Stream ID Error */ #define COMP_STRID_ERR 34 /* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */ -/* FIXME - check for this */ #define COMP_2ND_BW_ERR 35 /* Split Transaction Error */ #define COMP_SPLIT_ERR 36 diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c index fe1d443..8f725f6 100644 --- a/drivers/usb/misc/isight_firmware.c +++ b/drivers/usb/misc/isight_firmware.c @@ -55,8 +55,9 @@ static int isight_firmware_load(struct usb_interface *intf, ptr = firmware->data; + buf[0] = 0x01; if (usb_control_msg - (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1, + (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1, 300) != 1) { printk(KERN_ERR "Failed to initialise isight firmware loader\n"); @@ -100,8 +101,9 @@ static int isight_firmware_load(struct usb_interface *intf, } } + buf[0] = 0x00; if (usb_control_msg - (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1, + (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1, 300) != 1) { printk(KERN_ERR "isight firmware loading completion failed\n"); ret = -ENODEV; diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index dce7182..a0232a7 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -2078,8 +2078,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (status < 0) goto fail3; - pm_runtime_put(musb->controller); - status = musb_init_debugfs(musb); if (status < 0) goto fail4; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fd67cc5..a1a324b 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -92,6 +92,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ + { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */ { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index 60f38d5..0a8c1e6 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -315,7 +315,7 @@ static int omninet_write_room(struct tty_struct *tty) int room = 0; /* Default: no room */ /* FIXME: no consistent locking for write_urb_busy */ - if (wport->write_urb_busy) + if (!wport->write_urb_busy) room = wport->bulk_out_size - OMNINET_HEADERLEN; dbg("%s - returns %d", __func__, room); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index d2becb9..c96b6b6 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -472,6 +472,14 @@ static void option_instat_callback(struct urb *urb); #define YUGA_PRODUCT_CLU528 0x260D #define YUGA_PRODUCT_CLU526 0x260F +/* Viettel products */ +#define VIETTEL_VENDOR_ID 0x2262 +#define VIETTEL_PRODUCT_VT1000 0x0002 + +/* ZD Incorporated */ +#define ZD_VENDOR_ID 0x0685 +#define ZD_PRODUCT_7000 0x7000 + /* some devices interfaces need special handling due to a number of reasons */ enum option_blacklist_reason { OPTION_BLACKLIST_NONE = 0, @@ -1173,6 +1181,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, + { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index c325e69..9e069ef 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -1073,6 +1073,7 @@ static struct usb_driver usb_storage_driver = { .id_table = usb_storage_usb_ids, .supports_autosuspend = 1, .soft_unbind = 1, + .no_dynamic_id = 1, }; static int __init usb_stor_init(void) diff --git a/drivers/video/offb.c b/drivers/video/offb.c index cb163a5..3251a02 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c @@ -100,36 +100,32 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { struct offb_par *par = (struct offb_par *) info->par; - int i, depth; - u32 *pal = info->pseudo_palette; - - depth = info->var.bits_per_pixel; - if (depth == 16) - depth = (info->var.green.length == 5) ? 15 : 16; - - if (regno > 255 || - (depth == 16 && regno > 63) || - (depth == 15 && regno > 31)) - return 1; - - if (regno < 16) { - switch (depth) { - case 15: - pal[regno] = (regno << 10) | (regno << 5) | regno; - break; - case 16: - pal[regno] = (regno << 11) | (regno << 5) | regno; - break; - case 24: - pal[regno] = (regno << 16) | (regno << 8) | regno; - break; - case 32: - i = (regno << 8) | regno; - pal[regno] = (i << 16) | i; - break; + + if (info->fix.visual == FB_VISUAL_TRUECOLOR) { + u32 *pal = info->pseudo_palette; + u32 cr = red >> (16 - info->var.red.length); + u32 cg = green >> (16 - info->var.green.length); + u32 cb = blue >> (16 - info->var.blue.length); + u32 value; + + if (regno >= 16) + return -EINVAL; + + value = (cr << info->var.red.offset) | + (cg << info->var.green.offset) | + (cb << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + mask <<= info->var.transp.offset; + value |= mask; } + pal[regno] = value; + return 0; } + if (regno > 255) + return -EINVAL; + red >>= 8; green >>= 8; blue >>= 8; @@ -381,7 +377,7 @@ static void __init offb_init_fb(const char *name, const char *full_name, int pitch, unsigned long address, int foreign_endian, struct device_node *dp) { - unsigned long res_size = pitch * height * (depth + 7) / 8; + unsigned long res_size = pitch * height; struct offb_par *par = &default_par; unsigned long res_start = address; struct fb_fix_screeninfo *fix; diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 8cb2685..9cb60df 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -216,6 +216,7 @@ static int __devinit cru_detect(unsigned long map_entry, cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE; + set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE)); asminline_call(&cmn_regs, bios32_entrypoint); if (cmn_regs.u1.ral != 0) { @@ -233,8 +234,10 @@ static int __devinit cru_detect(unsigned long map_entry, if ((physical_bios_base + physical_bios_offset)) { cru_rom_addr = ioremap(cru_physical_address, cru_length); - if (cru_rom_addr) + if (cru_rom_addr) { + set_memory_x((unsigned long)cru_rom_addr, cru_length); retval = 0; + } } printk(KERN_DEBUG "hpwdt: CRU Base Address: 0x%lx\n", diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 84f317e..fd60dff 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -162,7 +162,7 @@ void __init xen_swiotlb_init(int verbose) /* * Get IO TLB memory from any location. */ - xen_io_tlb_start = alloc_bootmem(bytes); + xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); if (!xen_io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 3451d23..db9ba1a 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c @@ -1568,7 +1568,13 @@ static int ext3_ordered_writepage(struct page *page, int err; J_ASSERT(PageLocked(page)); - WARN_ON_ONCE(IS_RDONLY(inode)); + /* + * We don't want to warn for emergency remount. The condition is + * ordered to avoid dereferencing inode->i_sb in non-error case to + * avoid slow-downs. + */ + WARN_ON_ONCE(IS_RDONLY(inode) && + !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)); /* * We give up here if we're reentered, because it might be for a @@ -1642,7 +1648,13 @@ static int ext3_writeback_writepage(struct page *page, int err; J_ASSERT(PageLocked(page)); - WARN_ON_ONCE(IS_RDONLY(inode)); + /* + * We don't want to warn for emergency remount. The condition is + * ordered to avoid dereferencing inode->i_sb in non-error case to + * avoid slow-downs. + */ + WARN_ON_ONCE(IS_RDONLY(inode) && + !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)); if (ext3_journal_current_handle()) goto out_fail; @@ -1684,7 +1696,13 @@ static int ext3_journalled_writepage(struct page *page, int err; J_ASSERT(PageLocked(page)); - WARN_ON_ONCE(IS_RDONLY(inode)); + /* + * We don't want to warn for emergency remount. The condition is + * ordered to avoid dereferencing inode->i_sb in non-error case to + * avoid slow-downs. + */ + WARN_ON_ONCE(IS_RDONLY(inode) && + !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ERROR_FS)); if (ext3_journal_current_handle()) goto no_write; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e97dd21..87822a3 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -1519,16 +1519,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) { if (!flags) return; - else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) + if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) nfs41_handle_server_reboot(clp); - else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | + if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | SEQ4_STATUS_ADMIN_STATE_REVOKED | SEQ4_STATUS_LEASE_MOVED)) nfs41_handle_state_revoked(clp); - else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) + if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) nfs41_handle_recallable_state_revoked(clp); - else if (flags & (SEQ4_STATUS_CB_PATH_DOWN | + if (flags & (SEQ4_STATUS_CB_PATH_DOWN | SEQ4_STATUS_BACKCHANNEL_FAULT | SEQ4_STATUS_CB_PATH_DOWN_SESSION)) nfs41_handle_cb_path_down(clp); diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 41d6743..3e65427 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -842,6 +842,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case FS_IOC32_GETVERSION: cmd = FS_IOC_GETVERSION; break; + case NILFS_IOCTL_CHANGE_CPMODE: + case NILFS_IOCTL_DELETE_CHECKPOINT: + case NILFS_IOCTL_GET_CPINFO: + case NILFS_IOCTL_GET_CPSTAT: + case NILFS_IOCTL_GET_SUINFO: + case NILFS_IOCTL_GET_SUSTAT: + case NILFS_IOCTL_GET_VINFO: + case NILFS_IOCTL_GET_BDESCS: + case NILFS_IOCTL_CLEAN_SEGMENTS: + case NILFS_IOCTL_SYNC: + case NILFS_IOCTL_RESIZE: + case NILFS_IOCTL_SET_ALLOC_RANGE: + break; default: return -ENOIOCTLCMD; } diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index aa91089..f19dfbf 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -453,16 +453,20 @@ int remove_save_link(struct inode *inode, int truncate) static void reiserfs_kill_sb(struct super_block *s) { if (REISERFS_SB(s)) { - if (REISERFS_SB(s)->xattr_root) { - d_invalidate(REISERFS_SB(s)->xattr_root); - dput(REISERFS_SB(s)->xattr_root); - REISERFS_SB(s)->xattr_root = NULL; - } - if (REISERFS_SB(s)->priv_root) { - d_invalidate(REISERFS_SB(s)->priv_root); - dput(REISERFS_SB(s)->priv_root); - REISERFS_SB(s)->priv_root = NULL; - } + /* + * Force any pending inode evictions to occur now. Any + * inodes to be removed that have extended attributes + * associated with them need to clean them up before + * we can release the extended attribute root dentries. + * shrink_dcache_for_umount will BUG if we don't release + * those before it's called so ->put_super is too late. + */ + shrink_dcache_sb(s); + + dput(REISERFS_SB(s)->xattr_root); + REISERFS_SB(s)->xattr_root = NULL; + dput(REISERFS_SB(s)->priv_root); + REISERFS_SB(s)->priv_root = NULL; } kill_block_super(s); @@ -1164,7 +1168,8 @@ static void handle_quota_files(struct super_block *s, char **qf_names, kfree(REISERFS_SB(s)->s_qf_names[i]); REISERFS_SB(s)->s_qf_names[i] = qf_names[i]; } - REISERFS_SB(s)->s_jquota_fmt = *qfmt; + if (*qfmt) + REISERFS_SB(s)->s_jquota_fmt = *qfmt; } #endif diff --git a/fs/udf/file.c b/fs/udf/file.c index 2a346bb..0c0c9d3 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -125,7 +125,6 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, err = udf_expand_file_adinicb(inode); if (err) { udf_debug("udf_expand_adinicb: err=%d\n", err); - up_write(&iinfo->i_data_sem); return err; } } else { @@ -133,9 +132,10 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, iinfo->i_lenAlloc = pos + count; else iinfo->i_lenAlloc = inode->i_size; + up_write(&iinfo->i_data_sem); } - } - up_write(&iinfo->i_data_sem); + } else + up_write(&iinfo->i_data_sem); retval = generic_file_aio_write(iocb, iov, nr_segs, ppos); if (retval > 0) diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 1d1358e..262050f 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -145,6 +145,12 @@ const struct address_space_operations udf_aops = { .bmap = udf_bmap, }; +/* + * Expand file stored in ICB to a normal one-block-file + * + * This function requires i_data_sem for writing and releases it. + * This function requires i_mutex held + */ int udf_expand_file_adinicb(struct inode *inode) { struct page *page; @@ -163,9 +169,15 @@ int udf_expand_file_adinicb(struct inode *inode) iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; + up_write(&iinfo->i_data_sem); mark_inode_dirty(inode); return 0; } + /* + * Release i_data_sem so that we can lock a page - page lock ranks + * above i_data_sem. i_mutex still protects us against file changes. + */ + up_write(&iinfo->i_data_sem); page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); if (!page) @@ -181,6 +193,7 @@ int udf_expand_file_adinicb(struct inode *inode) SetPageUptodate(page); kunmap(page); } + down_write(&iinfo->i_data_sem); memset(iinfo->i_ext.i_data + iinfo->i_lenEAttr, 0x00, iinfo->i_lenAlloc); iinfo->i_lenAlloc = 0; @@ -190,17 +203,20 @@ int udf_expand_file_adinicb(struct inode *inode) iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; /* from now on we have normal address_space methods */ inode->i_data.a_ops = &udf_aops; + up_write(&iinfo->i_data_sem); err = inode->i_data.a_ops->writepage(page, &udf_wbc); if (err) { /* Restore everything back so that we don't lose data... */ lock_page(page); kaddr = kmap(page); + down_write(&iinfo->i_data_sem); memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr, kaddr, inode->i_size); kunmap(page); unlock_page(page); iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB; inode->i_data.a_ops = &udf_adinicb_aops; + up_write(&iinfo->i_data_sem); } page_cache_release(page); mark_inode_dirty(inode); @@ -1105,10 +1121,9 @@ int udf_setsize(struct inode *inode, loff_t newsize) if (bsize < (udf_file_entry_alloc_offset(inode) + newsize)) { err = udf_expand_file_adinicb(inode); - if (err) { - up_write(&iinfo->i_data_sem); + if (err) return err; - } + down_write(&iinfo->i_data_sem); } else iinfo->i_lenAlloc = newsize; } diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c index 4b9fb91..f86e034 100644 --- a/fs/xfs/linux-2.6/xfs_acl.c +++ b/fs/xfs/linux-2.6/xfs_acl.c @@ -39,7 +39,7 @@ xfs_acl_from_disk(struct xfs_acl *aclp) struct posix_acl_entry *acl_e; struct posix_acl *acl; struct xfs_acl_entry *ace; - int count, i; + unsigned int count, i; count = be32_to_cpu(aclp->acl_cnt); if (count > XFS_ACL_MAX_ENTRIES) diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 01bb878..523c451 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -871,27 +871,6 @@ xfs_fs_dirty_inode( } STATIC int -xfs_log_inode( - struct xfs_inode *ip) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_trans *tp; - int error; - - tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); - error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); - if (error) { - xfs_trans_cancel(tp, 0); - return error; - } - - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - return xfs_trans_commit(tp, 0); -} - -STATIC int xfs_fs_write_inode( struct inode *inode, struct writeback_control *wbc) @@ -904,10 +883,8 @@ xfs_fs_write_inode( if (XFS_FORCED_SHUTDOWN(mp)) return -XFS_ERROR(EIO); - if (!ip->i_update_core) - return 0; - if (wbc->sync_mode == WB_SYNC_ALL) { + if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) { /* * Make sure the inode has made it it into the log. Instead * of forcing it all the way to stable storage using a @@ -916,11 +893,14 @@ xfs_fs_write_inode( * of synchronous log foces dramatically. */ xfs_ioend_wait(ip); - error = xfs_log_inode(ip); + error = xfs_log_dirty_inode(ip, NULL, 0); if (error) goto out; return 0; } else { + if (!ip->i_update_core) + return 0; + /* * We make this non-blocking if the inode is contended, return * EAGAIN to indicate to the caller that they did not succeed. diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c index b69688d..2f277a0 100644 --- a/fs/xfs/linux-2.6/xfs_sync.c +++ b/fs/xfs/linux-2.6/xfs_sync.c @@ -336,6 +336,32 @@ xfs_sync_fsdata( return xfs_bwrite(mp, bp); } +int +xfs_log_dirty_inode( + struct xfs_inode *ip, + struct xfs_perag *pag, + int flags) +{ + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + int error; + + if (!ip->i_update_core) + return 0; + + tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); + error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); + if (error) { + xfs_trans_cancel(tp, 0); + return error; + } + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return xfs_trans_commit(tp, 0); +} + /* * When remounting a filesystem read-only or freezing the filesystem, we have * two phases to execute. This first phase is syncing the data before we @@ -365,6 +391,17 @@ xfs_quiesce_data( /* push and block till complete */ xfs_sync_data(mp, SYNC_WAIT); + + /* + * Log all pending size and timestamp updates. The vfs writeback + * code is supposed to do this, but due to its overagressive + * livelock detection it will skip inodes where appending writes + * were written out in the first non-blocking sync phase if their + * completion took long enough that it happened after taking the + * timestamp for the cut-off in the blocking phase. + */ + xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0); + xfs_qm_sync(mp, SYNC_WAIT); /* write superblock and hoover up shutdown errors */ diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h index e3a6ad2..ef5b2ce 100644 --- a/fs/xfs/linux-2.6/xfs_sync.h +++ b/fs/xfs/linux-2.6/xfs_sync.h @@ -42,6 +42,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_flush_inodes(struct xfs_inode *ip); +int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags); + int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); void xfs_inode_set_reclaim_tag(struct xfs_inode *ip); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1a23722..cd93f99 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -798,9 +798,6 @@ extern void blk_unprep_request(struct request *); */ extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id); -extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *, - request_fn_proc *, - spinlock_t *, int node_id); extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); extern struct request_queue *blk_init_allocated_queue(struct request_queue *, request_fn_proc *, spinlock_t *); diff --git a/include/linux/i2c/twl4030-madc.h b/include/linux/i2c/twl4030-madc.h index 6427d29..530e11b 100644 --- a/include/linux/i2c/twl4030-madc.h +++ b/include/linux/i2c/twl4030-madc.h @@ -129,6 +129,10 @@ enum sample_type { #define REG_BCICTL2 0x024 #define TWL4030_BCI_ITHSENS 0x007 +/* Register and bits for GPBR1 register */ +#define TWL4030_REG_GPBR1 0x0c +#define TWL4030_GPBR1_MADC_HFCLK_EN (1 << 7) + struct twl4030_madc_user_parms { int channel; int average; diff --git a/include/linux/lglock.h b/include/linux/lglock.h index 433f12d..d8acbcc 100644 --- a/include/linux/lglock.h +++ b/include/linux/lglock.h @@ -22,6 +22,7 @@ #include #include #include +#include /* can make br locks by using local lock for read side, global lock for write */ #define br_lock_init(name) name##_lock_init() @@ -75,9 +76,31 @@ #define DEFINE_LGLOCK(name) \ \ + DEFINE_SPINLOCK(name##_cpu_lock); \ + cpumask_t name##_cpus __read_mostly; \ DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ DEFINE_LGLOCK_LOCKDEP(name); \ \ + static int \ + name##_lg_cpu_callback(struct notifier_block *nb, \ + unsigned long action, void *hcpu) \ + { \ + switch (action & ~CPU_TASKS_FROZEN) { \ + case CPU_UP_PREPARE: \ + spin_lock(&name##_cpu_lock); \ + cpu_set((unsigned long)hcpu, name##_cpus); \ + spin_unlock(&name##_cpu_lock); \ + break; \ + case CPU_UP_CANCELED: case CPU_DEAD: \ + spin_lock(&name##_cpu_lock); \ + cpu_clear((unsigned long)hcpu, name##_cpus); \ + spin_unlock(&name##_cpu_lock); \ + } \ + return NOTIFY_OK; \ + } \ + static struct notifier_block name##_lg_cpu_notifier = { \ + .notifier_call = name##_lg_cpu_callback, \ + }; \ void name##_lock_init(void) { \ int i; \ LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ @@ -86,6 +109,11 @@ lock = &per_cpu(name##_lock, i); \ *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ } \ + register_hotcpu_notifier(&name##_lg_cpu_notifier); \ + get_online_cpus(); \ + for_each_online_cpu(i) \ + cpu_set(i, name##_cpus); \ + put_online_cpus(); \ } \ EXPORT_SYMBOL(name##_lock_init); \ \ @@ -127,9 +155,9 @@ \ void name##_global_lock_online(void) { \ int i; \ - preempt_disable(); \ + spin_lock(&name##_cpu_lock); \ rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ - for_each_online_cpu(i) { \ + for_each_cpu(i, &name##_cpus) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_lock(lock); \ @@ -140,12 +168,12 @@ void name##_global_unlock_online(void) { \ int i; \ rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ - for_each_online_cpu(i) { \ + for_each_cpu(i, &name##_cpus) { \ arch_spinlock_t *lock; \ lock = &per_cpu(name##_lock, i); \ arch_spin_unlock(lock); \ } \ - preempt_enable(); \ + spin_unlock(&name##_cpu_lock); \ } \ EXPORT_SYMBOL(name##_global_unlock_online); \ \ diff --git a/include/linux/usb.h b/include/linux/usb.h index 73c7df4..b08e04c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1202,6 +1202,7 @@ struct urb { void *transfer_buffer; /* (in) associated data buffer */ dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */ struct scatterlist *sg; /* (in) scatter gather buffer list */ + int num_mapped_sgs; /* (internal) mapped sg entries */ int num_sgs; /* (in) number of entries in the sg list */ u32 transfer_buffer_length; /* (in) data buffer length */ u32 actual_length; /* (return) actual transfer length */ diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index 0fd3fbd..cf65b5c 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h @@ -583,8 +583,26 @@ struct usb_ss_ep_comp_descriptor { } __attribute__ ((packed)); #define USB_DT_SS_EP_COMP_SIZE 6 + /* Bits 4:0 of bmAttributes if this is a bulk endpoint */ -#define USB_SS_MAX_STREAMS(p) (1 << ((p) & 0x1f)) +static inline int +usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp) +{ + int max_streams; + + if (!comp) + return 0; + + max_streams = comp->bmAttributes & 0x1f; + + if (!max_streams) + return 0; + + max_streams = 1 << max_streams; + + return max_streams; +} + /* Bits 1:0 of bmAttributes if this is an isoc endpoint */ #define USB_SS_MULT(p) (1 + ((p) & 0x3)) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 7df327a..c388421 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -236,6 +236,9 @@ extern struct sctp_globals { * bits is an indicator of when to send and window update SACK. */ int rwnd_update_shift; + + /* Threshold for autoclose timeout, in seconds. */ + unsigned long max_autoclose; } sctp_globals; #define sctp_rto_initial (sctp_globals.rto_initial) @@ -271,6 +274,7 @@ extern struct sctp_globals { #define sctp_auth_enable (sctp_globals.auth_enable) #define sctp_checksum_disable (sctp_globals.checksum_disable) #define sctp_rwnd_upd_shift (sctp_globals.rwnd_update_shift) +#define sctp_max_autoclose (sctp_globals.max_autoclose) /* SCTP Socket type: UDP or TCP style. */ typedef enum { diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 2d7503c..feb47ec 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1173,10 +1173,10 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts) /* * If the 'all' option was specified select all the subsystems, - * otherwise 'all, 'none' and a subsystem name options were not - * specified, let's default to 'all' + * otherwise if 'none', 'name=' and a subsystem name options + * were not specified, let's default to 'all' */ - if (all_ss || (!all_ss && !one_ss && !opts->none)) { + if (all_ss || (!one_ss && !opts->none && !opts->name)) { for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { struct cgroup_subsys *ss = subsys[i]; if (ss == NULL) @@ -2095,11 +2095,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) continue; /* get old css_set pointer */ task_lock(tsk); - if (tsk->flags & PF_EXITING) { - /* ignore this task if it's going away */ - task_unlock(tsk); - continue; - } oldcg = tsk->cgroups; get_css_set(oldcg); task_unlock(tsk); diff --git a/kernel/cpu.c b/kernel/cpu.c index 13066a3..b2de274 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -15,6 +15,7 @@ #include #include #include +#include #ifdef CONFIG_SMP /* Serializes the updates to cpu_online_mask, cpu_present_mask */ @@ -597,6 +598,79 @@ static int alloc_frozen_cpus(void) return 0; } core_initcall(alloc_frozen_cpus); + +/* + * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU + * hotplug when tasks are about to be frozen. Also, don't allow the freezer + * to continue until any currently running CPU hotplug operation gets + * completed. + * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the + * 'cpu_add_remove_lock'. And this same lock is also taken by the regular + * CPU hotplug path and released only after it is complete. Thus, we + * (and hence the freezer) will block here until any currently running CPU + * hotplug operation gets completed. + */ +void cpu_hotplug_disable_before_freeze(void) +{ + cpu_maps_update_begin(); + cpu_hotplug_disabled = 1; + cpu_maps_update_done(); +} + + +/* + * When tasks have been thawed, re-enable regular CPU hotplug (which had been + * disabled while beginning to freeze tasks). + */ +void cpu_hotplug_enable_after_thaw(void) +{ + cpu_maps_update_begin(); + cpu_hotplug_disabled = 0; + cpu_maps_update_done(); +} + +/* + * When callbacks for CPU hotplug notifications are being executed, we must + * ensure that the state of the system with respect to the tasks being frozen + * or not, as reported by the notification, remains unchanged *throughout the + * duration* of the execution of the callbacks. + * Hence we need to prevent the freezer from racing with regular CPU hotplug. + * + * This synchronization is implemented by mutually excluding regular CPU + * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ + * Hibernate notifications. + */ +static int +cpu_hotplug_pm_callback(struct notifier_block *nb, + unsigned long action, void *ptr) +{ + switch (action) { + + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + cpu_hotplug_disable_before_freeze(); + break; + + case PM_POST_SUSPEND: + case PM_POST_HIBERNATION: + cpu_hotplug_enable_after_thaw(); + break; + + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + + +int cpu_hotplug_pm_sync_init(void) +{ + pm_notifier(cpu_hotplug_pm_callback, 0); + return 0; +} +core_initcall(cpu_hotplug_pm_sync_init); + #endif /* CONFIG_PM_SLEEP_SMP */ /** diff --git a/kernel/exit.c b/kernel/exit.c index 7e8cbe8..d9a8d35 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1553,8 +1553,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, } /* dead body doesn't have much to contribute */ - if (p->exit_state == EXIT_DEAD) + if (unlikely(p->exit_state == EXIT_DEAD)) { + /* + * But do not ignore this task until the tracer does + * wait_task_zombie()->do_notify_parent(). + */ + if (likely(!ptrace) && unlikely(ptrace_reparented(p))) + wo->notask_error = 0; return 0; + } /* slay zombie? */ if (p->exit_state == EXIT_ZOMBIE) { diff --git a/kernel/futex.c b/kernel/futex.c index 30b238c..24ae478 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -314,17 +314,29 @@ again: #endif lock_page(page_head); + + /* + * If page_head->mapping is NULL, then it cannot be a PageAnon + * page; but it might be the ZERO_PAGE or in the gate area or + * in a special mapping (all cases which we are happy to fail); + * or it may have been a good file page when get_user_pages_fast + * found it, but truncated or holepunched or subjected to + * invalidate_complete_page2 before we got the page lock (also + * cases which we are happy to fail). And we hold a reference, + * so refcount care in invalidate_complete_page's remove_mapping + * prevents drop_caches from setting mapping to NULL beneath us. + * + * The case we do have to guard against is when memory pressure made + * shmem_writepage move it from filecache to swapcache beneath us: + * an unlikely race, but we do need to retry for page_head->mapping. + */ if (!page_head->mapping) { + int shmem_swizzled = PageSwapCache(page_head); unlock_page(page_head); put_page(page_head); - /* - * ZERO_PAGE pages don't have a mapping. Avoid a busy loop - * trying to find one. RW mapping would have COW'd (and thus - * have a mapping) so this page is RO and won't ever change. - */ - if ((page_head == ZERO_PAGE(address))) - return -EFAULT; - goto again; + if (shmem_swizzled) + goto again; + return -EFAULT; } /* diff --git a/kernel/hung_task.c b/kernel/hung_task.c index ea64012..e972276 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) /* * Ensure the task is not frozen. - * Also, when a freshly created task is scheduled once, changes - * its state to TASK_UNINTERRUPTIBLE without having ever been - * switched out once, it musn't be checked. + * Also, skip vfork and any other user process that freezer should skip. */ - if (unlikely(t->flags & PF_FROZEN || !switch_count)) + if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP))) + return; + + /* + * When a freshly created task is scheduled once, changes its state to + * TASK_UNINTERRUPTIBLE without having ever been switched out once, it + * musn't be checked. + */ + if (unlikely(!switch_count)) return; if (switch_count != t->last_switch_count) { diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c index 3b8e028..e055e8b 100644 --- a/kernel/sysctl_binary.c +++ b/kernel/sysctl_binary.c @@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen, fput(file); out_putname: - putname(pathname); + __putname(pathname); out: return result; } diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 13dfaab..e4c699d 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -286,7 +286,6 @@ void clockevents_exchange_device(struct clock_event_device *old, * released list and do a notify add later. */ if (old) { - old->event_handler = clockevents_handle_noop; clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED); list_del(&old->list); list_add(&old->list, &clockevents_released); diff --git a/localversion-rt b/localversion-rt index ce6a482..e1d8362 100644 --- a/localversion-rt +++ b/localversion-rt @@ -1 +1 @@ --rt32 +-rt33 diff --git a/mm/filemap.c b/mm/filemap.c index be1b637..0b843b3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1807,7 +1807,7 @@ repeat: page = __page_cache_alloc(gfp | __GFP_COLD); if (!page) return ERR_PTR(-ENOMEM); - err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL); + err = add_to_page_cache_lru(page, mapping, index, gfp); if (unlikely(err)) { page_cache_release(page); if (err == -EEXIST) @@ -1904,10 +1904,7 @@ static struct page *wait_on_page_read(struct page *page) * @gfp: the page allocator flags to use if allocating * * This is the same as "read_mapping_page(mapping, index, NULL)", but with - * any new page allocations done using the specified allocation flags. Note - * that the Radix tree operations will still use GFP_KERNEL, so you can't - * expect to do this atomically or anything like that - but you can pass in - * other page requirements. + * any new page allocations done using the specified allocation flags. * * If the page does not get brought uptodate, return -EIO. */ diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 80936a1..f9c5849 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -901,7 +901,6 @@ retry: h->resv_huge_pages += delta; ret = 0; - spin_unlock(&hugetlb_lock); /* Free the needed pages to the hugetlb pool */ list_for_each_entry_safe(page, tmp, &surplus_list, lru) { if ((--needed) < 0) @@ -915,6 +914,7 @@ retry: VM_BUG_ON(page_count(page)); enqueue_huge_page(h, page); } + spin_unlock(&hugetlb_lock); /* Free unnecessary surplus pages to the buddy allocator */ free: diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 59ac5d6..d99217b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4963,9 +4963,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) int cpu; enable_swap_cgroup(); parent = NULL; - root_mem_cgroup = mem; if (mem_cgroup_soft_limit_tree_init()) goto free_out; + root_mem_cgroup = mem; for_each_possible_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); @@ -5004,7 +5004,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) return &mem->css; free_out: __mem_cgroup_free(mem); - root_mem_cgroup = NULL; return ERR_PTR(error); } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 8093fc7..7c72487 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0; diff --git a/mm/percpu.c b/mm/percpu.c index 93b5a7c..0ae7a09 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1011,9 +1011,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr) if (!is_vmalloc_addr(addr)) return __pa(addr); else - return page_to_phys(vmalloc_to_page(addr)); + return page_to_phys(vmalloc_to_page(addr)) + + offset_in_page(addr); } else - return page_to_phys(pcpu_addr_to_page(addr)); + return page_to_phys(pcpu_addr_to_page(addr)) + + offset_in_page(addr); } /** diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 4155abc..7d7fb20 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { + int old_value = *(int *)ctl->data; int ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + int new_value = *(int *)ctl->data; if (write) { struct ipv4_devconf *cnf = ctl->extra1; @@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write, if (cnf == net->ipv4.devconf_dflt) devinet_copy_dflt_conf(net, i); + if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1) + if ((new_value == 0) && (old_value != 0)) + rt_cache_flush(net, 0); } return ret; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d577199..e0d42db 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -875,6 +875,8 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, * to be intended in a v3 query. */ max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE); + if (!max_delay) + max_delay = 1; /* can't mod w/ 0 */ } else { /* v3 */ if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) return; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index ab7e554..7fbcaba 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -252,6 +252,10 @@ static int __init ic_open_devs(void) } } + /* no point in waiting if we could not bring up at least one device */ + if (!ic_first_dev) + goto have_carrier; + /* wait for a carrier on at least one device */ start = jiffies; while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 378b20b..6f06f7f 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net, if (register_netdevice(dev) < 0) goto failed_free; + strcpy(nt->parms.name, dev->name); + dev_hold(dev); ipip_tunnel_link(ipn, nt); return nt; @@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev) struct ip_tunnel *tunnel = netdev_priv(dev); tunnel->dev = dev; - strcpy(tunnel->parms.name, dev->name); memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); @@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head) static int __net_init ipip_init_net(struct net *net) { struct ipip_net *ipn = net_generic(net, ipip_net_id); + struct ip_tunnel *t; int err; ipn->tunnels[0] = ipn->tunnels_wc; @@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net) if ((err = register_netdev(ipn->fb_tunnel_dev))) goto err_reg_dev; + t = netdev_priv(ipn->fb_tunnel_dev); + + strcpy(t->parms.name, ipn->fb_tunnel_dev->name); return 0; err_reg_dev: diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 7c6c3a8..52d37a7 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -91,6 +91,7 @@ #include #include #include +#include #include #include #include @@ -132,6 +133,9 @@ static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_advmss __read_mostly = 256; static int rt_chain_length_max __read_mostly = 20; +static struct delayed_work expires_work; +static unsigned long expires_ljiffies; + /* * Interface to generic destination cache. */ @@ -821,6 +825,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth) return ONE; } +static void rt_check_expire(void) +{ + static unsigned int rover; + unsigned int i = rover, goal; + struct rtable *rth; + struct rtable __rcu **rthp; + unsigned long samples = 0; + unsigned long sum = 0, sum2 = 0; + unsigned long delta; + u64 mult; + + delta = jiffies - expires_ljiffies; + expires_ljiffies = jiffies; + mult = ((u64)delta) << rt_hash_log; + if (ip_rt_gc_timeout > 1) + do_div(mult, ip_rt_gc_timeout); + goal = (unsigned int)mult; + if (goal > rt_hash_mask) + goal = rt_hash_mask + 1; + for (; goal > 0; goal--) { + unsigned long tmo = ip_rt_gc_timeout; + unsigned long length; + + i = (i + 1) & rt_hash_mask; + rthp = &rt_hash_table[i].chain; + + if (need_resched()) + cond_resched(); + + samples++; + + if (rcu_dereference_raw(*rthp) == NULL) + continue; + length = 0; + spin_lock_bh(rt_hash_lock_addr(i)); + while ((rth = rcu_dereference_protected(*rthp, + lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) { + prefetch(rth->dst.rt_next); + if (rt_is_expired(rth)) { + *rthp = rth->dst.rt_next; + rt_free(rth); + continue; + } + if (rth->dst.expires) { + /* Entry is expired even if it is in use */ + if (time_before_eq(jiffies, rth->dst.expires)) { +nofree: + tmo >>= 1; + rthp = &rth->dst.rt_next; + /* + * We only count entries on + * a chain with equal hash inputs once + * so that entries for different QOS + * levels, and other non-hash input + * attributes don't unfairly skew + * the length computation + */ + length += has_noalias(rt_hash_table[i].chain, rth); + continue; + } + } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) + goto nofree; + + /* Cleanup aged off entries. */ + *rthp = rth->dst.rt_next; + rt_free(rth); + } + spin_unlock_bh(rt_hash_lock_addr(i)); + sum += length; + sum2 += length*length; + } + if (samples) { + unsigned long avg = sum / samples; + unsigned long sd = int_sqrt(sum2 / samples - avg*avg); + rt_chain_length_max = max_t(unsigned long, + ip_rt_gc_elasticity, + (avg + 4*sd) >> FRACT_BITS); + } + rover = i; +} + +/* + * rt_worker_func() is run in process context. + * we call rt_check_expire() to scan part of the hash table + */ +static void rt_worker_func(struct work_struct *work) +{ + rt_check_expire(); + schedule_delayed_work(&expires_work, ip_rt_gc_interval); +} + /* * Perturbation of rt_genid by a small quantity [1..256] * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() @@ -3088,6 +3183,13 @@ static ctl_table ipv4_route_table[] = { .proc_handler = proc_dointvec_jiffies, }, { + .procname = "gc_interval", + .data = &ip_rt_gc_interval, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_jiffies, + }, + { .procname = "redirect_load", .data = &ip_rt_redirect_load, .maxlen = sizeof(int), @@ -3297,6 +3399,11 @@ int __init ip_rt_init(void) devinet_init(); ip_fib_init(); + INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func); + expires_ljiffies = jiffies; + schedule_delayed_work(&expires_work, + net_random() % ip_rt_gc_interval + ip_rt_gc_interval); + if (ip_rt_proc_init()) printk(KERN_ERR "Unable to create route proc files\n"); #ifdef CONFIG_XFRM diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1cca576..38490d5 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, if (register_netdevice(dev) < 0) goto failed_free; + strcpy(nt->parms.name, dev->name); + dev_hold(dev); ipip6_tunnel_link(sitn, nt); @@ -1141,7 +1143,6 @@ static int ipip6_tunnel_init(struct net_device *dev) struct ip_tunnel *tunnel = netdev_priv(dev); tunnel->dev = dev; - strcpy(tunnel->parms.name, dev->name); memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); @@ -1204,6 +1205,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea static int __net_init sit_init_net(struct net *net) { struct sit_net *sitn = net_generic(net, sit_net_id); + struct ip_tunnel *t; int err; sitn->tunnels[0] = sitn->tunnels_wc; @@ -1228,6 +1230,9 @@ static int __net_init sit_init_net(struct net *net) if ((err = register_netdev(sitn->fb_tunnel_dev))) goto err_reg_dev; + t = netdev_priv(sitn->fb_tunnel_dev); + + strcpy(t->parms.name, sitn->fb_tunnel_dev->name); return 0; err_reg_dev: diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index dfd3a64..a18e6c3 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, copied += used; len -= used; + /* For non stream protcols we get one packet per recvmsg call */ + if (sk->sk_type != SOCK_STREAM) + goto copy_uaddr; + if (!(flags & MSG_PEEK)) { sk_eat_skb(sk, skb, 0); *seq = 0; } - /* For non stream protcols we get one packet per recvmsg call */ - if (sk->sk_type != SOCK_STREAM) - goto copy_uaddr; - /* Partial read */ if (used + offset < skb->len) continue; @@ -857,6 +857,12 @@ copy_uaddr: } if (llc_sk(sk)->cmsg_flags) llc_cmsg_rcv(msg, skb); + + if (!(flags & MSG_PEEK)) { + sk_eat_skb(sk, skb, 0); + *seq = 0; + } + goto out; } diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index db7db43..b7f4f5c 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -304,6 +304,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) __release(agg_queue); } +/* + * splice packets from the STA's pending to the local pending, + * requires a call to ieee80211_agg_splice_finish later + */ +static void __acquires(agg_queue) +ieee80211_agg_splice_packets(struct ieee80211_local *local, + struct tid_ampdu_tx *tid_tx, u16 tid) +{ + int queue = ieee80211_ac_from_tid(tid); + unsigned long flags; + + ieee80211_stop_queue_agg(local, tid); + + if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" + " from the pending queue\n", tid)) + return; + + if (!skb_queue_empty(&tid_tx->pending)) { + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + /* copy over remaining packets */ + skb_queue_splice_tail_init(&tid_tx->pending, + &local->pending[queue]); + spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); + } +} + +static void __releases(agg_queue) +ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) +{ + ieee80211_wake_queue_agg(local, tid); +} + void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) { struct tid_ampdu_tx *tid_tx; @@ -315,19 +347,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) tid_tx = rcu_dereference_protected_tid_tx(sta, tid); /* - * While we're asking the driver about the aggregation, - * stop the AC queue so that we don't have to worry - * about frames that came in while we were doing that, - * which would require us to put them to the AC pending - * afterwards which just makes the code more complex. + * Start queuing up packets for this aggregation session. + * We're going to release them once the driver is OK with + * that. */ - ieee80211_stop_queue_agg(local, tid); - clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); /* - * make sure no packets are being processed to get - * valid starting sequence number + * Make sure no packets are being processed. This ensures that + * we have a valid starting sequence number and that in-flight + * packets have been flushed out and no packets for this TID + * will go into the driver during the ampdu_action call. */ synchronize_net(); @@ -341,17 +371,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) " tid %d\n", tid); #endif spin_lock_bh(&sta->lock); + ieee80211_agg_splice_packets(local, tid_tx, tid); ieee80211_assign_tid_tx(sta, tid, NULL); + ieee80211_agg_splice_finish(local, tid); spin_unlock_bh(&sta->lock); - ieee80211_wake_queue_agg(local, tid); kfree_rcu(tid_tx, rcu_head); return; } - /* we can take packets again now */ - ieee80211_wake_queue_agg(local, tid); - /* activate the timer for the recipient's addBA response */ mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); #ifdef CONFIG_MAC80211_HT_DEBUG @@ -471,38 +499,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, } EXPORT_SYMBOL(ieee80211_start_tx_ba_session); -/* - * splice packets from the STA's pending to the local pending, - * requires a call to ieee80211_agg_splice_finish later - */ -static void __acquires(agg_queue) -ieee80211_agg_splice_packets(struct ieee80211_local *local, - struct tid_ampdu_tx *tid_tx, u16 tid) -{ - int queue = ieee80211_ac_from_tid(tid); - unsigned long flags; - - ieee80211_stop_queue_agg(local, tid); - - if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" - " from the pending queue\n", tid)) - return; - - if (!skb_queue_empty(&tid_tx->pending)) { - spin_lock_irqsave(&local->queue_stop_reason_lock, flags); - /* copy over remaining packets */ - skb_queue_splice_tail_init(&tid_tx->pending, - &local->pending[queue]); - spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); - } -} - -static void __releases(agg_queue) -ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) -{ - ieee80211_wake_queue_agg(local, tid); -} - static void ieee80211_agg_tx_operational(struct ieee80211_local *local, struct sta_info *sta, u16 tid) { diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index b9493a0..6cd8ddf 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp, struct gred_sched_data *q; if (table->tab[dp] == NULL) { - table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL); + table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC); if (table->tab[dp] == NULL) return -ENOMEM; } diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index ea17cbe..59b26b8 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -106,7 +106,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) if (!netif_is_multiqueue(dev)) return -EOPNOTSUPP; - if (nla_len(opt) < sizeof(*qopt)) + if (!opt || nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 4a62888..17a6e65 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = - (unsigned long)sp->autoclose * HZ; + min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ; /* Initializes the timers */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) diff --git a/net/sctp/output.c b/net/sctp/output.c index 08b3cea..817174e 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet, /* Keep track of how many bytes are in flight to the receiver. */ asoc->outqueue.outstanding_bytes += datasize; - /* Update our view of the receiver's rwnd. Include sk_buff overhead - * while updating peer.rwnd so that it reduces the chances of a - * receiver running out of receive buffer space even when receive - * window is still open. This can happen when a sender is sending - * sending small messages. - */ - datasize += sizeof(struct sk_buff); + /* Update our view of the receiver's rwnd. */ if (datasize < rwnd) rwnd -= datasize; else diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index d036821..1f2938f 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, chunk->transport->flight_size -= sctp_data_size(chunk); q->outstanding_bytes -= sctp_data_size(chunk); - q->asoc->peer.rwnd += (sctp_data_size(chunk) + - sizeof(struct sk_buff)); + q->asoc->peer.rwnd += sctp_data_size(chunk); } continue; } @@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, * (Section 7.2.4)), add the data size of those * chunks to the rwnd. */ - q->asoc->peer.rwnd += (sctp_data_size(chunk) + - sizeof(struct sk_buff)); + q->asoc->peer.rwnd += sctp_data_size(chunk); q->outstanding_bytes -= sctp_data_size(chunk); if (chunk->transport) transport->flight_size -= sctp_data_size(chunk); diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 207175b..946afd6 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -1144,6 +1144,9 @@ SCTP_STATIC __init int sctp_init(void) sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; + /* Initialize maximum autoclose timeout. */ + sctp_max_autoclose = INT_MAX / HZ; + /* Initialize handle used for association ids. */ idr_init(&sctp_assocs_id); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d3ccf79..fa9b5c7 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2129,8 +2129,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, return -EINVAL; if (copy_from_user(&sp->autoclose, optval, optlen)) return -EFAULT; - /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */ - sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ); return 0; } diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 50cb57f..6752f48 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -53,6 +53,10 @@ static int sack_timer_min = 1; static int sack_timer_max = 500; static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ static int rwnd_scale_max = 16; +static unsigned long max_autoclose_min = 0; +static unsigned long max_autoclose_max = + (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) + ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ; extern long sysctl_sctp_mem[3]; extern int sysctl_sctp_rmem[3]; @@ -251,6 +255,15 @@ static ctl_table sctp_table[] = { .extra1 = &one, .extra2 = &rwnd_scale_max, }, + { + .procname = "max_autoclose", + .data = &sctp_max_autoclose, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = &proc_doulongvec_minmax, + .extra1 = &max_autoclose_min, + .extra2 = &max_autoclose_max, + }, { /* sentinel */ } }; diff --git a/security/selinux/netport.c b/security/selinux/netport.c index cfe2d72..e2b74eb 100644 --- a/security/selinux/netport.c +++ b/security/selinux/netport.c @@ -139,7 +139,9 @@ static void sel_netport_insert(struct sel_netport *port) if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) { struct sel_netport *tail; tail = list_entry( - rcu_dereference(sel_netport_hash[idx].list.prev), + rcu_dereference_protected( + sel_netport_hash[idx].list.prev, + lockdep_is_held(&sel_netport_lock)), struct sel_netport, list); list_del_rcu(&tail->list); call_rcu(&tail->rcu, sel_netport_free); diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 6c164dc..bf54c48 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -1582,6 +1582,8 @@ process_symbols(struct event *event, struct print_arg *arg, char **tok) field = malloc_or_die(sizeof(*field)); type = process_arg(event, field, &token); + while (type == EVENT_OP) + type = process_op(event, field, &token); if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free;