1
diff --git a/Makefile b/Makefile
2
index 3e7196f..1786938 100644
3
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
4
index 2c1db77..a6c66f5 100644
5
--- a/arch/arm/kernel/entry-common.S
6
+++ b/arch/arm/kernel/entry-common.S
7
@@ -382,11 +382,13 @@ ENDPROC(sys_clone_wrapper)
11
+ mov why, #0 @ prevent syscall restart handling
13
ENDPROC(sys_sigreturn_wrapper)
15
sys_rt_sigreturn_wrapper:
17
+ mov why, #0 @ prevent syscall restart handling
19
ENDPROC(sys_rt_sigreturn_wrapper)
21
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
22
index dfcf75b..c8662cd 100644
23
--- a/arch/ia64/include/asm/compat.h
24
+++ b/arch/ia64/include/asm/compat.h
25
@@ -198,7 +198,7 @@ ptr_to_compat(void __user *uptr)
28
static __inline__ void __user *
29
-compat_alloc_user_space (long len)
30
+arch_compat_alloc_user_space (long len)
32
struct pt_regs *regs = task_pt_regs(current);
33
return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
34
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
35
index 6c89228..4a746ea 100644
36
--- a/arch/ia64/kernel/msi_ia64.c
37
+++ b/arch/ia64/kernel/msi_ia64.c
38
@@ -25,7 +25,7 @@ static int ia64_set_msi_irq_affinity(unsigned int irq,
39
if (irq_prepare_move(irq, cpu))
42
- read_msi_msg(irq, &msg);
43
+ get_cached_msi_msg(irq, &msg);
45
addr = msg.address_lo;
46
addr &= MSI_ADDR_DEST_ID_MASK;
47
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
48
index fbbfb97..9ab2617 100644
49
--- a/arch/ia64/sn/kernel/msi_sn.c
50
+++ b/arch/ia64/sn/kernel/msi_sn.c
51
@@ -174,7 +174,7 @@ static int sn_set_msi_irq_affinity(unsigned int irq,
52
* Release XIO resources for the old MSI PCI address
55
- read_msi_msg(irq, &msg);
56
+ get_cached_msi_msg(irq, &msg);
57
sn_pdev = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
58
pdev = sn_pdev->pdi_linux_pcidev;
59
provider = SN_PCIDEV_BUSPROVIDER(pdev);
60
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
61
index f58aed3..27505bd 100644
62
--- a/arch/mips/include/asm/compat.h
63
+++ b/arch/mips/include/asm/compat.h
64
@@ -144,7 +144,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
65
return (u32)(unsigned long)uptr;
68
-static inline void __user *compat_alloc_user_space(long len)
69
+static inline void __user *arch_compat_alloc_user_space(long len)
71
struct pt_regs *regs = (struct pt_regs *)
72
((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
73
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
74
index 7f32611..7c77fa9 100644
75
--- a/arch/parisc/include/asm/compat.h
76
+++ b/arch/parisc/include/asm/compat.h
77
@@ -146,7 +146,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
78
return (u32)(unsigned long)uptr;
81
-static __inline__ void __user *compat_alloc_user_space(long len)
82
+static __inline__ void __user *arch_compat_alloc_user_space(long len)
84
struct pt_regs *regs = ¤t->thread.regs;
85
return (void __user *)regs->gr[30];
86
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
87
index 4774c2f..8d0fff3 100644
88
--- a/arch/powerpc/include/asm/compat.h
89
+++ b/arch/powerpc/include/asm/compat.h
90
@@ -133,7 +133,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
91
return (u32)(unsigned long)uptr;
94
-static inline void __user *compat_alloc_user_space(long len)
95
+static inline void __user *arch_compat_alloc_user_space(long len)
97
struct pt_regs *regs = current->thread.regs;
98
unsigned long usp = regs->gpr[1];
99
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
100
index 01a0802..0c940d3 100644
101
--- a/arch/s390/include/asm/compat.h
102
+++ b/arch/s390/include/asm/compat.h
103
@@ -180,7 +180,7 @@ static inline int is_compat_task(void)
107
-static inline void __user *compat_alloc_user_space(long len)
108
+static inline void __user *arch_compat_alloc_user_space(long len)
112
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
113
index 0e70625..612bb38 100644
114
--- a/arch/sparc/include/asm/compat.h
115
+++ b/arch/sparc/include/asm/compat.h
116
@@ -166,7 +166,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
117
return (u32)(unsigned long)uptr;
120
-static inline void __user *compat_alloc_user_space(long len)
121
+static inline void __user *arch_compat_alloc_user_space(long len)
123
struct pt_regs *regs = current_thread_info()->kregs;
124
unsigned long usp = regs->u_regs[UREG_I6];
125
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
126
index 5294d84..4edd8eb 100644
127
--- a/arch/x86/ia32/ia32entry.S
128
+++ b/arch/x86/ia32/ia32entry.S
131
* Reload arg registers from stack in case ptrace changed them.
132
* We don't reload %eax because syscall_trace_enter() returned
133
- * the value it wants us to use in the table lookup.
134
+ * the %rax value we should see. Instead, we just truncate that
135
+ * value to 32 bits again as we did on entry from user mode.
136
+ * If it's a new value set by user_regset during entry tracing,
137
+ * this matches the normal truncation of the user-mode value.
138
+ * If it's -1 to make us punt the syscall, then (u32)-1 is still
139
+ * an appropriately invalid value.
141
.macro LOAD_ARGS32 offset, _r9=0
144
movl \offset+48(%rsp),%edx
145
movl \offset+56(%rsp),%esi
146
movl \offset+64(%rsp),%edi
147
+ movl %eax,%eax /* zero extension */
150
.macro CFI_STARTPROC32 simple
151
@@ -153,7 +159,7 @@ ENTRY(ia32_sysenter_target)
152
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
154
jnz sysenter_tracesys
155
- cmpl $(IA32_NR_syscalls-1),%eax
156
+ cmpq $(IA32_NR_syscalls-1),%rax
160
@@ -195,7 +201,7 @@ sysexit_from_sys_call:
161
movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
162
call audit_syscall_entry
163
movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
164
- cmpl $(IA32_NR_syscalls-1),%eax
165
+ cmpq $(IA32_NR_syscalls-1),%rax
167
movl %ebx,%edi /* reload 1st syscall arg */
168
movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
169
@@ -248,7 +254,7 @@ sysenter_tracesys:
170
call syscall_trace_enter
171
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
173
- cmpl $(IA32_NR_syscalls-1),%eax
174
+ cmpq $(IA32_NR_syscalls-1),%rax
175
ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
178
@@ -314,7 +320,7 @@ ENTRY(ia32_cstar_target)
179
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
182
- cmpl $IA32_NR_syscalls-1,%eax
183
+ cmpq $IA32_NR_syscalls-1,%rax
187
@@ -367,7 +373,7 @@ cstar_tracesys:
188
LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
191
- cmpl $(IA32_NR_syscalls-1),%eax
192
+ cmpq $(IA32_NR_syscalls-1),%rax
193
ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
195
END(ia32_cstar_target)
196
@@ -425,7 +431,7 @@ ENTRY(ia32_syscall)
197
orl $TS_COMPAT,TI_status(%r10)
198
testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
200
- cmpl $(IA32_NR_syscalls-1),%eax
201
+ cmpq $(IA32_NR_syscalls-1),%rax
205
@@ -444,7 +450,7 @@ ia32_tracesys:
206
call syscall_trace_enter
207
LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
209
- cmpl $(IA32_NR_syscalls-1),%eax
210
+ cmpq $(IA32_NR_syscalls-1),%rax
211
ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
214
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
215
index 9a9c7bd..c8c9a74 100644
216
--- a/arch/x86/include/asm/compat.h
217
+++ b/arch/x86/include/asm/compat.h
218
@@ -204,7 +204,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
219
return (u32)(unsigned long)uptr;
222
-static inline void __user *compat_alloc_user_space(long len)
223
+static inline void __user *arch_compat_alloc_user_space(long len)
225
struct pt_regs *regs = task_pt_regs(current);
226
return (void __user *)regs->sp - len;
227
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
228
index c042729..1ca132f 100644
229
--- a/arch/x86/include/asm/tsc.h
230
+++ b/arch/x86/include/asm/tsc.h
231
@@ -59,5 +59,7 @@ extern void check_tsc_sync_source(int cpu);
232
extern void check_tsc_sync_target(void);
234
extern int notsc_setup(char *);
235
+extern void save_sched_clock_state(void);
236
+extern void restore_sched_clock_state(void);
238
#endif /* _ASM_X86_TSC_H */
239
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
240
index 1acd1c4..0da6495 100644
241
--- a/arch/x86/kernel/apic/io_apic.c
242
+++ b/arch/x86/kernel/apic/io_apic.c
243
@@ -3338,7 +3338,7 @@ static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
245
cfg = desc->chip_data;
247
- read_msi_msg_desc(desc, &msg);
248
+ get_cached_msi_msg_desc(desc, &msg);
250
msg.data &= ~MSI_DATA_VECTOR_MASK;
251
msg.data |= MSI_DATA_VECTOR(cfg->vector);
252
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
253
index 597683a..aaefa71 100644
254
--- a/arch/x86/kernel/tsc.c
255
+++ b/arch/x86/kernel/tsc.c
256
@@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
257
local_irq_restore(flags);
260
+static unsigned long long cyc2ns_suspend;
262
+void save_sched_clock_state(void)
264
+ if (!sched_clock_stable)
267
+ cyc2ns_suspend = sched_clock();
271
+ * Even on processors with invariant TSC, TSC gets reset in some the
272
+ * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to
273
+ * arbitrary value (still sync'd across cpu's) during resume from such sleep
274
+ * states. To cope up with this, recompute the cyc2ns_offset for each cpu so
275
+ * that sched_clock() continues from the point where it was left off during
278
+void restore_sched_clock_state(void)
280
+ unsigned long long offset;
281
+ unsigned long flags;
284
+ if (!sched_clock_stable)
287
+ local_irq_save(flags);
289
+ __get_cpu_var(cyc2ns_offset) = 0;
290
+ offset = cyc2ns_suspend - sched_clock();
292
+ for_each_possible_cpu(cpu)
293
+ per_cpu(cyc2ns_offset, cpu) = offset;
295
+ local_irq_restore(flags);
298
#ifdef CONFIG_CPU_FREQ
300
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
301
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
302
index c41ad50..3130a4b 100644
303
--- a/arch/x86/oprofile/nmi_int.c
304
+++ b/arch/x86/oprofile/nmi_int.c
305
@@ -518,8 +518,13 @@ static int __init init_sysfs(void)
308
error = sysdev_class_register(&oprofile_sysclass);
310
- error = sysdev_register(&device_oprofile);
314
+ error = sysdev_register(&device_oprofile);
316
+ sysdev_class_unregister(&oprofile_sysclass);
321
@@ -530,8 +535,10 @@ static void exit_sysfs(void)
325
-#define init_sysfs() do { } while (0)
326
-#define exit_sysfs() do { } while (0)
328
+static inline int init_sysfs(void) { return 0; }
329
+static inline void exit_sysfs(void) { }
331
#endif /* CONFIG_PM */
333
static int __init p4_init(char **cpu_type)
334
@@ -645,6 +652,8 @@ int __init op_nmi_init(struct oprofile_operations *ops)
335
char *cpu_type = NULL;
343
@@ -727,7 +736,10 @@ int __init op_nmi_init(struct oprofile_operations *ops)
348
+ ret = init_sysfs();
353
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
355
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
356
index eeeb522..fa0f651 100644
357
--- a/arch/x86/power/cpu.c
358
+++ b/arch/x86/power/cpu.c
359
@@ -112,6 +112,7 @@ static void __save_processor_state(struct saved_context *ctxt)
360
void save_processor_state(void)
362
__save_processor_state(&saved_context);
363
+ save_sched_clock_state();
366
EXPORT_SYMBOL(save_processor_state);
367
@@ -253,6 +254,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
368
void restore_processor_state(void)
370
__restore_processor_state(&saved_context);
371
+ restore_sched_clock_state();
374
EXPORT_SYMBOL(restore_processor_state);
375
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
376
index 6a96da6..0963cd6 100644
377
--- a/drivers/ata/libata-core.c
378
+++ b/drivers/ata/libata-core.c
379
@@ -5504,6 +5504,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
381
int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
383
+ unsigned int ehi_flags = ATA_EHI_QUIET;
387
@@ -5512,7 +5513,18 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
389
ata_lpm_enable(host);
391
- rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
393
+ * On some hardware, device fails to respond after spun down
394
+ * for suspend. As the device won't be used before being
395
+ * resumed, we don't need to touch the device. Ask EH to skip
396
+ * the usual stuff and proceed directly to suspend.
398
+ * http://thread.gmane.org/gmane.linux.ide/46764
400
+ if (mesg.event == PM_EVENT_SUSPEND)
401
+ ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
403
+ rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
405
host->dev->power.power_state = mesg;
407
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
408
index e30b9e7..fa9bed0 100644
409
--- a/drivers/ata/libata-eh.c
410
+++ b/drivers/ata/libata-eh.c
411
@@ -3149,6 +3149,10 @@ static int ata_eh_skip_recovery(struct ata_link *link)
412
if (link->flags & ATA_LFLAG_DISABLED)
415
+ /* skip if explicitly requested */
416
+ if (ehc->i.flags & ATA_EHI_NO_RECOVERY)
419
/* thaw frozen port and recover failed devices */
420
if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
422
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
423
index 6f5093b..cf41126 100644
424
--- a/drivers/ata/sata_mv.c
425
+++ b/drivers/ata/sata_mv.c
426
@@ -1879,19 +1879,25 @@ static void mv_bmdma_start(struct ata_queued_cmd *qc)
428
* Inherited from caller.
430
-static void mv_bmdma_stop(struct ata_queued_cmd *qc)
431
+static void mv_bmdma_stop_ap(struct ata_port *ap)
433
- struct ata_port *ap = qc->ap;
434
void __iomem *port_mmio = mv_ap_base(ap);
437
/* clear start/stop bit */
438
cmd = readl(port_mmio + BMDMA_CMD);
439
- cmd &= ~ATA_DMA_START;
440
- writelfl(cmd, port_mmio + BMDMA_CMD);
441
+ if (cmd & ATA_DMA_START) {
442
+ cmd &= ~ATA_DMA_START;
443
+ writelfl(cmd, port_mmio + BMDMA_CMD);
445
+ /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
446
+ ata_sff_dma_pause(ap);
450
- /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
451
- ata_sff_dma_pause(ap);
452
+static void mv_bmdma_stop(struct ata_queued_cmd *qc)
454
+ mv_bmdma_stop_ap(qc->ap);
458
@@ -1915,8 +1921,21 @@ static u8 mv_bmdma_status(struct ata_port *ap)
459
reg = readl(port_mmio + BMDMA_STATUS);
460
if (reg & ATA_DMA_ACTIVE)
461
status = ATA_DMA_ACTIVE;
463
+ else if (reg & ATA_DMA_ERR)
464
status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
467
+ * Just because DMA_ACTIVE is 0 (DMA completed),
468
+ * this does _not_ mean the device is "done".
469
+ * So we should not yet be signalling ATA_DMA_INTR
470
+ * in some cases. Eg. DSM/TRIM, and perhaps others.
472
+ mv_bmdma_stop_ap(ap);
473
+ if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
476
+ status = ATA_DMA_INTR;
481
@@ -1976,6 +1995,9 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
483
switch (tf->protocol) {
485
+ if (tf->command == ATA_CMD_DSM)
489
break; /* continue below */
491
@@ -2075,6 +2097,8 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
492
if ((tf->protocol != ATA_PROT_DMA) &&
493
(tf->protocol != ATA_PROT_NCQ))
495
+ if (tf->command == ATA_CMD_DSM)
496
+ return; /* use bmdma for this */
498
/* Fill in Gen IIE command request block */
499
if (!(tf->flags & ATA_TFLAG_WRITE))
500
@@ -2270,6 +2294,12 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
502
switch (qc->tf.protocol) {
504
+ if (qc->tf.command == ATA_CMD_DSM) {
505
+ if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
506
+ return AC_ERR_OTHER;
507
+ break; /* use bmdma for this */
511
mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
512
pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
513
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
514
index 08173fc..1b8745d 100644
515
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
516
index 2680db7..c3aca5c 100644
517
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
518
index 176a6df..3ada62b 100644
519
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
520
index 4f5c733..79cc437 100644
521
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
522
index 1ca6574..e9add5b 100644
523
--- a/drivers/hid/usbhid/hid-core.c
524
+++ b/drivers/hid/usbhid/hid-core.c
525
@@ -1000,16 +1000,6 @@ static int usbhid_start(struct hid_device *hid)
529
- init_waitqueue_head(&usbhid->wait);
530
- INIT_WORK(&usbhid->reset_work, hid_reset);
531
- INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
532
- setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
534
- spin_lock_init(&usbhid->lock);
536
- usbhid->intf = intf;
537
- usbhid->ifnum = interface->desc.bInterfaceNumber;
539
usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL);
540
if (!usbhid->urbctrl) {
542
@@ -1180,6 +1170,14 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
544
hid->driver_data = usbhid;
546
+ usbhid->intf = intf;
547
+ usbhid->ifnum = interface->desc.bInterfaceNumber;
549
+ init_waitqueue_head(&usbhid->wait);
550
+ INIT_WORK(&usbhid->reset_work, hid_reset);
551
+ INIT_WORK(&usbhid->restart_work, __usbhid_restart_queues);
552
+ setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
553
+ spin_lock_init(&usbhid->lock);
555
ret = hid_add_device(hid);
557
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
558
index e2107e5..afebc34 100644
559
--- a/drivers/hwmon/f75375s.c
560
+++ b/drivers/hwmon/f75375s.c
561
@@ -79,7 +79,7 @@ I2C_CLIENT_INSMOD_2(f75373, f75375);
562
#define F75375_REG_PWM2_DROP_DUTY 0x6C
564
#define FAN_CTRL_LINEAR(nr) (4 + nr)
565
-#define FAN_CTRL_MODE(nr) (5 + ((nr) * 2))
566
+#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
569
* Data structures and manipulation thereof
570
@@ -298,7 +298,7 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
573
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
574
- fanmode = ~(3 << FAN_CTRL_MODE(nr));
575
+ fanmode &= ~(3 << FAN_CTRL_MODE(nr));
578
case 0: /* Full speed */
579
@@ -350,7 +350,7 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
581
mutex_lock(&data->update_lock);
582
conf = f75375_read8(client, F75375_REG_CONFIG1);
583
- conf = ~(1 << FAN_CTRL_LINEAR(nr));
584
+ conf &= ~(1 << FAN_CTRL_LINEAR(nr));
587
conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
588
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
589
index f808d18..4f84d1a 100644
590
--- a/drivers/hwmon/k8temp.c
591
+++ b/drivers/hwmon/k8temp.c
592
@@ -143,6 +143,37 @@ static struct pci_device_id k8temp_ids[] = {
594
MODULE_DEVICE_TABLE(pci, k8temp_ids);
596
+static int __devinit is_rev_g_desktop(u8 model)
603
+ if (model == 0xc1 || model == 0x6c || model == 0x7c)
607
+ * Differentiate between AM2 and ASB1.
608
+ * See "Constructing the processor Name String" in "Revision
609
+ * Guide for AMD NPT Family 0Fh Processors" (33610).
611
+ brandidx = cpuid_ebx(0x80000001);
612
+ brandidx = (brandidx >> 9) & 0x1f;
615
+ if ((model == 0x6f || model == 0x7f) &&
616
+ (brandidx == 0x7 || brandidx == 0x9 || brandidx == 0xc))
620
+ if (model == 0x6b &&
621
+ (brandidx == 0xb || brandidx == 0xc))
627
static int __devinit k8temp_probe(struct pci_dev *pdev,
628
const struct pci_device_id *id)
630
@@ -179,9 +210,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
631
"wrong - check erratum #141\n");
634
- if ((model >= 0x69) &&
635
- !(model == 0xc1 || model == 0x6c || model == 0x7c ||
636
- model == 0x6b || model == 0x6f || model == 0x7f)) {
637
+ if (is_rev_g_desktop(model)) {
639
* RevG desktop CPUs (i.e. no socket S1G1 or
640
* ASB1 parts) need additional offset,
641
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
642
index 1df02d2..16f5ab2 100644
643
--- a/drivers/input/serio/i8042.c
644
+++ b/drivers/input/serio/i8042.c
645
@@ -1412,8 +1412,8 @@ static int __init i8042_init(void)
647
static void __exit i8042_exit(void)
649
- platform_driver_unregister(&i8042_driver);
650
platform_device_unregister(i8042_platform_device);
651
+ platform_driver_unregister(&i8042_driver);
652
i8042_platform_exit();
655
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
656
index 91991b4..f43edfd 100644
657
--- a/drivers/mmc/host/tmio_mmc.c
658
+++ b/drivers/mmc/host/tmio_mmc.c
659
@@ -161,6 +161,7 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
660
static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
662
struct mmc_data *data = host->data;
667
@@ -170,8 +171,8 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
671
- buf = (unsigned short *)(tmio_mmc_kmap_atomic(host, &flags) +
673
+ sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
674
+ buf = (unsigned short *)(sg_virt + host->sg_off);
676
count = host->sg_ptr->length - host->sg_off;
677
if (count > data->blksz)
678
@@ -188,7 +189,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
680
host->sg_off += count;
682
- tmio_mmc_kunmap_atomic(host, &flags);
683
+ tmio_mmc_kunmap_atomic(sg_virt, &flags);
685
if (host->sg_off == host->sg_ptr->length)
686
tmio_mmc_next_sg(host);
687
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
688
index 9fa9985..ee8fa89 100644
689
--- a/drivers/mmc/host/tmio_mmc.h
690
+++ b/drivers/mmc/host/tmio_mmc.h
693
#define ack_mmc_irqs(host, i) \
696
- mask = sd_ctrl_read32((host), CTL_STATUS); \
697
- mask &= ~((i) & TMIO_MASK_IRQ); \
698
- sd_ctrl_write32((host), CTL_STATUS, mask); \
699
+ sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
703
@@ -200,19 +197,17 @@ static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
704
return --host->sg_len;
707
-static inline char *tmio_mmc_kmap_atomic(struct tmio_mmc_host *host,
708
+static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
709
unsigned long *flags)
711
- struct scatterlist *sg = host->sg_ptr;
713
local_irq_save(*flags);
714
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
717
-static inline void tmio_mmc_kunmap_atomic(struct tmio_mmc_host *host,
718
+static inline void tmio_mmc_kunmap_atomic(void *virt,
719
unsigned long *flags)
721
- kunmap_atomic(sg_page(host->sg_ptr), KM_BIO_SRC_IRQ);
722
+ kunmap_atomic(virt, KM_BIO_SRC_IRQ);
723
local_irq_restore(*flags);
726
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
727
index 4fdfa2a..0f77aca 100644
728
--- a/drivers/net/tun.c
729
+++ b/drivers/net/tun.c
730
@@ -1006,7 +1006,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
734
- if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
735
+ if (!net_eq(dev_net(tun->dev), &init_net) ||
736
+ device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
737
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
738
device_create_file(&tun->dev->dev, &dev_attr_group))
739
printk(KERN_ERR "Failed to create tun sysfs files\n");
740
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
741
index ce166ae..2c4914a 100644
742
--- a/drivers/net/wireless/ath/ath5k/base.c
743
+++ b/drivers/net/wireless/ath/ath5k/base.c
744
@@ -1288,6 +1288,10 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
747
rate = ieee80211_get_tx_rate(sc->hw, info);
753
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
754
flags |= AR5K_TXDESC_NOACK;
755
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
756
index 4fe33f7..a5daa0d 100644
757
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
758
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
761
#define SD_NO_CTL 0xE0
763
-#define CTL_MODE_M 7
764
+#define CTL_MODE_M 0xf
768
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
769
index c1dd857..21cf521 100644
770
--- a/drivers/net/wireless/ath/regd.h
771
+++ b/drivers/net/wireless/ath/regd.h
772
@@ -31,7 +31,6 @@ enum ctl_group {
774
#define SD_NO_CTL 0xE0
776
-#define CTL_MODE_M 7
780
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
781
index 9d147de..0edd7b4 100644
782
--- a/drivers/net/wireless/p54/txrx.c
783
+++ b/drivers/net/wireless/p54/txrx.c
784
@@ -445,7 +445,7 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
787
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
788
- (!payload->status))
789
+ !(payload->status & P54_TX_FAILED))
790
info->flags |= IEEE80211_TX_STAT_ACK;
791
if (payload->status & P54_TX_PSM_CANCELLED)
792
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
793
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
794
index c9e2ae9..5c4df24 100644
795
--- a/drivers/oprofile/buffer_sync.c
796
+++ b/drivers/oprofile/buffer_sync.c
797
@@ -140,16 +140,6 @@ static struct notifier_block module_load_nb = {
798
.notifier_call = module_load_notify,
802
-static void end_sync(void)
805
- /* make sure we don't leak task structs */
806
- process_task_mortuary();
807
- process_task_mortuary();
814
@@ -157,7 +147,7 @@ int sync_start(void)
815
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
819
+ mutex_lock(&buffer_mutex);
821
err = task_handoff_register(&task_free_nb);
823
@@ -172,7 +162,10 @@ int sync_start(void)
830
+ mutex_unlock(&buffer_mutex);
833
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
834
@@ -181,7 +174,6 @@ out3:
836
task_handoff_unregister(&task_free_nb);
839
free_cpumask_var(marked_cpus);
842
@@ -189,11 +181,20 @@ out1:
846
+ /* flush buffers */
847
+ mutex_lock(&buffer_mutex);
849
unregister_module_notifier(&module_load_nb);
850
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
851
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
852
task_handoff_unregister(&task_free_nb);
854
+ mutex_unlock(&buffer_mutex);
855
+ flush_scheduled_work();
857
+ /* make sure we don't leak task structs */
858
+ process_task_mortuary();
859
+ process_task_mortuary();
861
free_cpumask_var(marked_cpus);
864
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
865
index 1f1f5a8..5e2ac4a 100644
866
--- a/drivers/oprofile/cpu_buffer.c
867
+++ b/drivers/oprofile/cpu_buffer.c
868
@@ -121,8 +121,6 @@ void end_cpu_work(void)
870
cancel_delayed_work(&b->work);
873
- flush_scheduled_work();
877
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
878
index f9cf317..0fb1d05 100644
879
--- a/drivers/pci/msi.c
880
+++ b/drivers/pci/msi.c
881
@@ -195,6 +195,9 @@ void unmask_msi_irq(unsigned int irq)
882
void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
884
struct msi_desc *entry = get_irq_desc_msi(desc);
886
+ BUG_ON(entry->dev->current_state != PCI_D0);
888
if (entry->msi_attrib.is_msix) {
889
void __iomem *base = entry->mask_base +
890
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
891
@@ -228,10 +231,32 @@ void read_msi_msg(unsigned int irq, struct msi_msg *msg)
892
read_msi_msg_desc(desc, msg);
895
+void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
897
+ struct msi_desc *entry = get_irq_desc_msi(desc);
899
+ /* Assert that the cache is valid, assuming that
900
+ * valid messages are not all-zeroes. */
901
+ BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo |
907
+void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
909
+ struct irq_desc *desc = irq_to_desc(irq);
911
+ get_cached_msi_msg_desc(desc, msg);
914
void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg)
916
struct msi_desc *entry = get_irq_desc_msi(desc);
917
- if (entry->msi_attrib.is_msix) {
919
+ if (entry->dev->current_state != PCI_D0) {
920
+ /* Don't touch the hardware now */
921
+ } else if (entry->msi_attrib.is_msix) {
923
base = entry->mask_base +
924
entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
925
diff --git a/drivers/power/apm_power.c b/drivers/power/apm_power.c
926
index 936bae5..dc628cb 100644
927
--- a/drivers/power/apm_power.c
928
+++ b/drivers/power/apm_power.c
929
@@ -233,6 +233,7 @@ static int calculate_capacity(enum apm_source source)
930
empty_design_prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
931
now_prop = POWER_SUPPLY_PROP_ENERGY_NOW;
932
avg_prop = POWER_SUPPLY_PROP_ENERGY_AVG;
935
full_prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
936
empty_prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
937
diff --git a/drivers/staging/hv/RingBuffer.c b/drivers/staging/hv/RingBuffer.c
938
index f69ae33..3a38103 100644
939
--- a/drivers/staging/hv/RingBuffer.c
940
+++ b/drivers/staging/hv/RingBuffer.c
941
@@ -192,7 +192,7 @@ Description:
943
GetRingBufferIndices(RING_BUFFER_INFO* RingInfo)
945
- return ((u64)RingInfo->RingBuffer->WriteIndex << 32) || RingInfo->RingBuffer->ReadIndex;
946
+ return (u64)RingInfo->RingBuffer->WriteIndex << 32;
950
diff --git a/drivers/staging/hv/StorVscApi.h b/drivers/staging/hv/StorVscApi.h
951
index 69c1406..3d8ff08 100644
952
--- a/drivers/staging/hv/StorVscApi.h
953
+++ b/drivers/staging/hv/StorVscApi.h
955
#include "VmbusApi.h"
958
-#define STORVSC_RING_BUFFER_SIZE (10*PAGE_SIZE)
959
+#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
960
#define BLKVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
962
-#define STORVSC_MAX_IO_REQUESTS 64
963
+#define STORVSC_MAX_IO_REQUESTS 128
966
* In Hyper-V, each port/path/target maps to 1 scsi host adapter. In
967
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/staging/hv/netvsc_drv.c
968
index 4c3c8bc..547261d 100644
969
--- a/drivers/staging/hv/netvsc_drv.c
970
+++ b/drivers/staging/hv/netvsc_drv.c
971
@@ -392,6 +392,9 @@ static const struct net_device_ops device_ops = {
972
.ndo_start_xmit = netvsc_start_xmit,
973
.ndo_get_stats = netvsc_get_stats,
974
.ndo_set_multicast_list = netvsc_set_multicast_list,
975
+ .ndo_change_mtu = eth_change_mtu,
976
+ .ndo_validate_addr = eth_validate_addr,
977
+ .ndo_set_mac_address = eth_mac_addr,
980
static int netvsc_probe(struct device *device)
981
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
982
index d49dc21..2a4b147 100644
983
--- a/drivers/staging/hv/storvsc_drv.c
984
+++ b/drivers/staging/hv/storvsc_drv.c
985
@@ -532,7 +532,7 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
987
ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
990
+ if (bounce_addr == 0)
991
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
994
@@ -593,7 +593,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
995
destlen = orig_sgl[i].length;
996
ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
999
+ if (bounce_addr == 0)
1000
bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
1003
@@ -652,6 +652,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
1004
unsigned int request_size = 0;
1006
struct scatterlist *sgl;
1007
+ unsigned int sg_count = 0;
1009
DPRINT_ENTER(STORVSC_DRV);
1011
@@ -736,6 +737,7 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
1012
request->DataBuffer.Length = scsi_bufflen(scmnd);
1013
if (scsi_sg_count(scmnd)) {
1014
sgl = (struct scatterlist *)scsi_sglist(scmnd);
1015
+ sg_count = scsi_sg_count(scmnd);
1017
/* check if we need to bounce the sgl */
1018
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
1019
@@ -770,11 +772,12 @@ static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
1020
scsi_sg_count(scmnd));
1022
sgl = cmd_request->bounce_sgl;
1023
+ sg_count = cmd_request->bounce_sgl_count;
1026
request->DataBuffer.Offset = sgl[0].offset;
1028
- for (i = 0; i < scsi_sg_count(scmnd); i++) {
1029
+ for (i = 0; i < sg_count; i++) {
1030
DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d \n",
1031
i, sgl[i].length, sgl[i].offset);
1032
request->DataBuffer.PfnArray[i] =
1033
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1034
index 0e64037..e3017c4 100644
1035
--- a/drivers/usb/class/cdc-acm.c
1036
+++ b/drivers/usb/class/cdc-acm.c
1037
@@ -971,7 +971,8 @@ static int acm_probe(struct usb_interface *intf,
1041
- if (intf->cur_altsetting->endpoint->extralen &&
1042
+ if (intf->cur_altsetting->endpoint &&
1043
+ intf->cur_altsetting->endpoint->extralen &&
1044
intf->cur_altsetting->endpoint->extra) {
1046
"Seeking extra descriptors on endpoint\n");
1047
@@ -1464,6 +1465,17 @@ err_out:
1050
#endif /* CONFIG_PM */
1052
+#define NOKIA_PCSUITE_ACM_INFO(x) \
1053
+ USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
1054
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1055
+ USB_CDC_ACM_PROTO_VENDOR)
1057
+#define SAMSUNG_PCSUITE_ACM_INFO(x) \
1058
+ USB_DEVICE_AND_INTERFACE_INFO(0x04e7, x, \
1059
+ USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1060
+ USB_CDC_ACM_PROTO_VENDOR)
1063
* USB driver structure.
1065
@@ -1521,6 +1533,76 @@ static struct usb_device_id acm_ids[] = {
1066
{ USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */
1067
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1069
+ { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
1070
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1073
+ /* Nokia S60 phones expose two ACM channels. The first is
1074
+ * a modem and is picked up by the standard AT-command
1075
+ * information below. The second is 'vendor-specific' but
1076
+ * is treated as a serial device at the S60 end, so we want
1077
+ * to expose it on Linux too. */
1078
+ { NOKIA_PCSUITE_ACM_INFO(0x042D), }, /* Nokia 3250 */
1079
+ { NOKIA_PCSUITE_ACM_INFO(0x04D8), }, /* Nokia 5500 Sport */
1080
+ { NOKIA_PCSUITE_ACM_INFO(0x04C9), }, /* Nokia E50 */
1081
+ { NOKIA_PCSUITE_ACM_INFO(0x0419), }, /* Nokia E60 */
1082
+ { NOKIA_PCSUITE_ACM_INFO(0x044D), }, /* Nokia E61 */
1083
+ { NOKIA_PCSUITE_ACM_INFO(0x0001), }, /* Nokia E61i */
1084
+ { NOKIA_PCSUITE_ACM_INFO(0x0475), }, /* Nokia E62 */
1085
+ { NOKIA_PCSUITE_ACM_INFO(0x0508), }, /* Nokia E65 */
1086
+ { NOKIA_PCSUITE_ACM_INFO(0x0418), }, /* Nokia E70 */
1087
+ { NOKIA_PCSUITE_ACM_INFO(0x0425), }, /* Nokia N71 */
1088
+ { NOKIA_PCSUITE_ACM_INFO(0x0486), }, /* Nokia N73 */
1089
+ { NOKIA_PCSUITE_ACM_INFO(0x04DF), }, /* Nokia N75 */
1090
+ { NOKIA_PCSUITE_ACM_INFO(0x000e), }, /* Nokia N77 */
1091
+ { NOKIA_PCSUITE_ACM_INFO(0x0445), }, /* Nokia N80 */
1092
+ { NOKIA_PCSUITE_ACM_INFO(0x042F), }, /* Nokia N91 & N91 8GB */
1093
+ { NOKIA_PCSUITE_ACM_INFO(0x048E), }, /* Nokia N92 */
1094
+ { NOKIA_PCSUITE_ACM_INFO(0x0420), }, /* Nokia N93 */
1095
+ { NOKIA_PCSUITE_ACM_INFO(0x04E6), }, /* Nokia N93i */
1096
+ { NOKIA_PCSUITE_ACM_INFO(0x04B2), }, /* Nokia 5700 XpressMusic */
1097
+ { NOKIA_PCSUITE_ACM_INFO(0x0134), }, /* Nokia 6110 Navigator (China) */
1098
+ { NOKIA_PCSUITE_ACM_INFO(0x046E), }, /* Nokia 6110 Navigator */
1099
+ { NOKIA_PCSUITE_ACM_INFO(0x002f), }, /* Nokia 6120 classic & */
1100
+ { NOKIA_PCSUITE_ACM_INFO(0x0088), }, /* Nokia 6121 classic */
1101
+ { NOKIA_PCSUITE_ACM_INFO(0x00fc), }, /* Nokia 6124 classic */
1102
+ { NOKIA_PCSUITE_ACM_INFO(0x0042), }, /* Nokia E51 */
1103
+ { NOKIA_PCSUITE_ACM_INFO(0x00b0), }, /* Nokia E66 */
1104
+ { NOKIA_PCSUITE_ACM_INFO(0x00ab), }, /* Nokia E71 */
1105
+ { NOKIA_PCSUITE_ACM_INFO(0x0481), }, /* Nokia N76 */
1106
+ { NOKIA_PCSUITE_ACM_INFO(0x0007), }, /* Nokia N81 & N81 8GB */
1107
+ { NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */
1108
+ { NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */
1109
+ { NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB */
1110
+ { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
1111
+ { NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */
1112
+ { NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */
1113
+ { NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */
1114
+ { NOKIA_PCSUITE_ACM_INFO(0x00a0), }, /* Nokia 6650 */
1115
+ { NOKIA_PCSUITE_ACM_INFO(0x007b), }, /* Nokia N78 */
1116
+ { NOKIA_PCSUITE_ACM_INFO(0x0094), }, /* Nokia N85 */
1117
+ { NOKIA_PCSUITE_ACM_INFO(0x003a), }, /* Nokia N96 & N96-3 */
1118
+ { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
1119
+ { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
1120
+ { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
1121
+ { NOKIA_PCSUITE_ACM_INFO(0x02e3), }, /* Nokia 5230, RM-588 */
1122
+ { NOKIA_PCSUITE_ACM_INFO(0x0178), }, /* Nokia E63 */
1123
+ { NOKIA_PCSUITE_ACM_INFO(0x010e), }, /* Nokia E75 */
1124
+ { NOKIA_PCSUITE_ACM_INFO(0x02d9), }, /* Nokia 6760 Slide */
1125
+ { NOKIA_PCSUITE_ACM_INFO(0x01d0), }, /* Nokia E52 */
1126
+ { NOKIA_PCSUITE_ACM_INFO(0x0223), }, /* Nokia E72 */
1127
+ { NOKIA_PCSUITE_ACM_INFO(0x0275), }, /* Nokia X6 */
1128
+ { NOKIA_PCSUITE_ACM_INFO(0x026c), }, /* Nokia N97 Mini */
1129
+ { NOKIA_PCSUITE_ACM_INFO(0x0154), }, /* Nokia 5800 XpressMusic */
1130
+ { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
1131
+ { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
1132
+ { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
1134
+ /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1136
+ /* control interfaces without any protocol set */
1137
+ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1138
+ USB_CDC_PROTO_NONE) },
1140
/* control interfaces with various AT-command sets */
1141
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1142
@@ -1536,7 +1618,6 @@ static struct usb_device_id acm_ids[] = {
1143
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1144
USB_CDC_ACM_PROTO_AT_CDMA) },
1146
- /* NOTE: COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
1150
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
1151
index 48267bc..33ac6ac 100644
1152
--- a/drivers/usb/gadget/rndis.c
1153
+++ b/drivers/usb/gadget/rndis.c
1154
@@ -291,9 +291,13 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
1156
case OID_GEN_VENDOR_DESCRIPTION:
1157
pr_debug("%s: OID_GEN_VENDOR_DESCRIPTION\n", __func__);
1158
- length = strlen (rndis_per_dev_params [configNr].vendorDescr);
1160
- rndis_per_dev_params [configNr].vendorDescr, length);
1161
+ if ( rndis_per_dev_params [configNr].vendorDescr ) {
1162
+ length = strlen (rndis_per_dev_params [configNr].vendorDescr);
1164
+ rndis_per_dev_params [configNr].vendorDescr, length);
1171
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
1172
index 36f96da..ab26c2b 100644
1173
--- a/drivers/usb/host/ehci-ppc-of.c
1174
+++ b/drivers/usb/host/ehci-ppc-of.c
1175
@@ -192,17 +192,19 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
1178
rv = usb_add_hcd(hcd, irq, 0);
1187
+ if (ehci->has_amcc_usb23)
1188
+ iounmap(ehci->ohci_hcctrl_reg);
1191
irq_dispose_mapping(irq);
1193
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
1195
- if (ehci->has_amcc_usb23)
1196
- iounmap(ehci->ohci_hcctrl_reg);
1200
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1201
index 99bde5f..93c4923 100644
1202
--- a/drivers/usb/serial/cp210x.c
1203
+++ b/drivers/usb/serial/cp210x.c
1204
@@ -90,6 +90,7 @@ static struct usb_device_id id_table [] = {
1205
{ USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */
1206
{ USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */
1207
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
1208
+ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
1209
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
1210
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
1211
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
1212
@@ -111,6 +112,7 @@ static struct usb_device_id id_table [] = {
1213
{ USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */
1214
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
1215
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
1216
+ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
1217
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1218
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
1219
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
1220
@@ -124,14 +126,14 @@ static struct usb_device_id id_table [] = {
1221
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
1222
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
1223
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
1224
- { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
1225
- { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1226
- { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1227
- { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
1228
{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
1229
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
1230
{ USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
1231
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
1232
+ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
1233
+ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
1234
+ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
1235
+ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
1236
{ } /* Terminating Entry */
1239
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1240
index 813ec3d..a7044b1 100644
1241
--- a/drivers/usb/serial/ftdi_sio.c
1242
+++ b/drivers/usb/serial/ftdi_sio.c
1243
@@ -759,6 +759,14 @@ static struct usb_device_id id_table_combined [] = {
1244
{ USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) },
1245
{ USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID),
1246
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1247
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_24_MASTER_WING_PID) },
1248
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_PC_WING_PID) },
1249
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_USB_DMX_PID) },
1250
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MIDI_TIMECODE_PID) },
1251
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MINI_WING_PID) },
1252
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MAXI_WING_PID) },
1253
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_MEDIA_WING_PID) },
1254
+ { USB_DEVICE(FTDI_VID, FTDI_CHAMSYS_WING_PID) },
1255
{ }, /* Optional parameter entry */
1256
{ } /* Terminating entry */
1258
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1259
index 52c3b68..30d3011 100644
1260
--- a/drivers/usb/serial/ftdi_sio_ids.h
1261
+++ b/drivers/usb/serial/ftdi_sio_ids.h
1262
@@ -135,6 +135,18 @@
1263
#define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */
1266
+ * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs
1268
+#define FTDI_CHAMSYS_24_MASTER_WING_PID 0xDAF8
1269
+#define FTDI_CHAMSYS_PC_WING_PID 0xDAF9
1270
+#define FTDI_CHAMSYS_USB_DMX_PID 0xDAFA
1271
+#define FTDI_CHAMSYS_MIDI_TIMECODE_PID 0xDAFB
1272
+#define FTDI_CHAMSYS_MINI_WING_PID 0xDAFC
1273
+#define FTDI_CHAMSYS_MAXI_WING_PID 0xDAFD
1274
+#define FTDI_CHAMSYS_MEDIA_WING_PID 0xDAFE
1275
+#define FTDI_CHAMSYS_WING_PID 0xDAFF
1278
* Westrex International devices submitted by Cory Lee
1280
#define FTDI_WESTREX_MODEL_777_PID 0xDC00 /* Model 777 */
1281
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
1282
index a861cd2..cf79fb2 100644
1283
--- a/drivers/usb/serial/mos7840.c
1284
+++ b/drivers/usb/serial/mos7840.c
1285
@@ -120,15 +120,20 @@
1286
* by making a change here, in moschip_port_id_table, and in
1287
* moschip_id_table_combined
1289
-#define USB_VENDOR_ID_BANDB 0x0856
1290
-#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
1291
-#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
1292
-#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
1293
-#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
1294
-#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
1295
-#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
1296
-#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
1297
-#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
1298
+#define USB_VENDOR_ID_BANDB 0x0856
1299
+#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
1300
+#define BANDB_DEVICE_ID_USO9ML2_2P 0xBC00
1301
+#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
1302
+#define BANDB_DEVICE_ID_USO9ML2_4P 0xBC01
1303
+#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
1304
+#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
1305
+#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
1306
+#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
1307
+#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
1308
+#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
1309
+#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
1310
+#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
1311
+#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
1313
/* This driver also supports
1314
* ATEN UC2324 device using Moschip MCS7840
1315
@@ -184,13 +189,18 @@ static struct usb_device_id moschip_port_id_table[] = {
1316
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
1317
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
1318
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
1319
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
1320
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
1321
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
1322
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
1323
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
1324
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
1325
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
1326
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
1327
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
1328
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
1329
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
1330
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
1331
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
1332
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
1333
{} /* terminating entry */
1334
@@ -200,13 +210,18 @@ static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
1335
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
1336
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
1337
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
1338
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2P)},
1339
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
1340
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4P)},
1341
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
1342
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
1343
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
1344
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
1345
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
1346
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P)},
1347
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
1348
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P)},
1349
+ {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4)},
1350
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
1351
{USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
1352
{} /* terminating entry */
1353
@@ -280,12 +295,19 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
1355
struct usb_device *dev = port->serial->dev;
1359
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
1363
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
1364
- MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
1365
+ MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
1368
dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
1369
- *val = (*val) & 0x00ff;
1375
@@ -338,6 +360,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
1376
struct usb_device *dev = port->serial->dev;
1381
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
1385
/* dbg("application number is %4x",
1386
(((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
1387
@@ -361,9 +388,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
1390
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
1391
- MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH,
1392
+ MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
1394
- *val = (*val) & 0x00ff;
1401
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
1402
index 30e0467..a4dc7bf 100644
1403
--- a/drivers/xen/events.c
1404
+++ b/drivers/xen/events.c
1405
@@ -106,6 +106,7 @@ static inline unsigned long *cpu_evtchn_mask(int cpu)
1406
#define VALID_EVTCHN(chn) ((chn) != 0)
1408
static struct irq_chip xen_dynamic_chip;
1409
+static struct irq_chip xen_percpu_chip;
1411
/* Constructor for packed IRQ information. */
1412
static struct irq_info mk_unbound_info(void)
1413
@@ -362,7 +363,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
1414
irq = find_unbound_irq();
1416
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
1417
- handle_level_irq, "event");
1418
+ handle_edge_irq, "event");
1420
evtchn_to_irq[evtchn] = irq;
1421
irq_info[irq] = mk_evtchn_info(evtchn);
1422
@@ -388,8 +389,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
1426
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
1427
- handle_level_irq, "ipi");
1428
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
1429
+ handle_percpu_irq, "ipi");
1431
bind_ipi.vcpu = cpu;
1432
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1433
@@ -429,8 +430,8 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
1435
irq = find_unbound_irq();
1437
- set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
1438
- handle_level_irq, "virq");
1439
+ set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
1440
+ handle_percpu_irq, "virq");
1442
evtchn_to_irq[evtchn] = irq;
1443
irq_info[irq] = mk_virq_info(evtchn, virq);
1444
@@ -929,6 +930,16 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
1445
.retrigger = retrigger_dynirq,
1448
+static struct irq_chip en_percpu_chip __read_mostly = {
1449
+ .name = "xen-percpu",
1451
+ .disable = disable_dynirq,
1452
+ .mask = disable_dynirq,
1453
+ .unmask = enable_dynirq,
1455
+ .ack = ack_dynirq,
1458
void __init xen_init_IRQ(void)
1461
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
1462
index c4e8353..42b60b0 100644
1463
--- a/fs/binfmt_misc.c
1464
+++ b/fs/binfmt_misc.c
1465
@@ -723,7 +723,7 @@ static int __init init_misc_binfmt(void)
1467
int err = register_filesystem(&bm_fs_type);
1469
- err = register_binfmt(&misc_format);
1470
+ err = insert_binfmt(&misc_format);
1472
unregister_filesystem(&bm_fs_type);
1474
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
1475
index 51d9e33..650546f 100644
1478
@@ -1158,6 +1158,14 @@ __acquires(&fc->lock)
1482
+static void end_queued_requests(struct fuse_conn *fc)
1484
+ fc->max_background = UINT_MAX;
1485
+ flush_bg_queue(fc);
1486
+ end_requests(fc, &fc->pending);
1487
+ end_requests(fc, &fc->processing);
1491
* Abort all requests.
1493
@@ -1184,8 +1192,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
1496
end_io_requests(fc);
1497
- end_requests(fc, &fc->pending);
1498
- end_requests(fc, &fc->processing);
1499
+ end_queued_requests(fc);
1500
wake_up_all(&fc->waitq);
1501
wake_up_all(&fc->blocked_waitq);
1502
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1503
@@ -1200,8 +1207,9 @@ int fuse_dev_release(struct inode *inode, struct file *file)
1505
spin_lock(&fc->lock);
1507
- end_requests(fc, &fc->pending);
1508
- end_requests(fc, &fc->processing);
1510
+ end_queued_requests(fc);
1511
+ wake_up_all(&fc->blocked_waitq);
1512
spin_unlock(&fc->lock);
1515
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
1516
index 127ed5c..19cbbf7 100644
1517
--- a/fs/nfs/client.c
1518
+++ b/fs/nfs/client.c
1519
@@ -273,7 +273,7 @@ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
1520
sin1->sin6_scope_id != sin2->sin6_scope_id)
1523
- return ipv6_addr_equal(&sin1->sin6_addr, &sin1->sin6_addr);
1524
+ return ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr);
1526
#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
1527
static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
1528
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
1529
index 4c827d8..3fcb479 100644
1530
--- a/fs/ocfs2/inode.c
1531
+++ b/fs/ocfs2/inode.c
1532
@@ -485,7 +485,11 @@ static int ocfs2_read_locked_inode(struct inode *inode,
1533
OCFS2_BH_IGNORE_CACHE);
1535
status = ocfs2_read_blocks_sync(osb, args->fi_blkno, 1, &bh);
1538
+ * If buffer is in jbd, then its checksum may not have been
1539
+ * computed as yet.
1541
+ if (!status && !buffer_jbd(bh))
1542
status = ocfs2_validate_inode_block(osb->sb, bh);
1545
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
1546
index f5ea468..7118a38 100644
1547
--- a/fs/sysfs/file.c
1548
+++ b/fs/sysfs/file.c
1549
@@ -340,7 +340,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
1552
p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
1555
memmove(last_sysfs_file, p, strlen(p) + 1);
1557
/* need attr_sd for attr and ops, its parent for kobj */
1558
diff --git a/include/linux/compat.h b/include/linux/compat.h
1559
index af931ee..cab23f2 100644
1560
--- a/include/linux/compat.h
1561
+++ b/include/linux/compat.h
1562
@@ -309,5 +309,7 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename,
1563
asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename,
1564
int flags, int mode);
1566
+extern void __user *compat_alloc_user_space(unsigned long len);
1568
#endif /* CONFIG_COMPAT */
1569
#endif /* _LINUX_COMPAT_H */
1570
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
1571
index a5740fc..a73454a 100644
1572
--- a/include/linux/cpuset.h
1573
+++ b/include/linux/cpuset.h
1574
@@ -21,8 +21,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */
1575
extern int cpuset_init(void);
1576
extern void cpuset_init_smp(void);
1577
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
1578
-extern void cpuset_cpus_allowed_locked(struct task_struct *p,
1579
- struct cpumask *mask);
1580
+extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
1581
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
1582
#define cpuset_current_mems_allowed (current->mems_allowed)
1583
void cpuset_init_current_mems_allowed(void);
1584
@@ -69,9 +68,6 @@ struct seq_file;
1585
extern void cpuset_task_status_allowed(struct seq_file *m,
1586
struct task_struct *task);
1588
-extern void cpuset_lock(void);
1589
-extern void cpuset_unlock(void);
1591
extern int cpuset_mem_spread_node(void);
1593
static inline int cpuset_do_page_mem_spread(void)
1594
@@ -105,10 +101,11 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
1596
cpumask_copy(mask, cpu_possible_mask);
1598
-static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
1599
- struct cpumask *mask)
1601
+static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
1603
- cpumask_copy(mask, cpu_possible_mask);
1604
+ cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
1605
+ return cpumask_any(cpu_active_mask);
1608
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
1609
@@ -157,9 +154,6 @@ static inline void cpuset_task_status_allowed(struct seq_file *m,
1613
-static inline void cpuset_lock(void) {}
1614
-static inline void cpuset_unlock(void) {}
1616
static inline int cpuset_mem_spread_node(void)
1619
diff --git a/include/linux/libata.h b/include/linux/libata.h
1620
index b0f6d97..a069916 100644
1621
--- a/include/linux/libata.h
1622
+++ b/include/linux/libata.h
1623
@@ -339,6 +339,7 @@ enum {
1624
ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
1625
ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
1626
ATA_EHI_QUIET = (1 << 3), /* be quiet */
1627
+ ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */
1629
ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */
1630
ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */
1631
diff --git a/include/linux/msi.h b/include/linux/msi.h
1632
index 6991ab5..91b05c1 100644
1633
--- a/include/linux/msi.h
1634
+++ b/include/linux/msi.h
1635
@@ -14,8 +14,10 @@ struct irq_desc;
1636
extern void mask_msi_irq(unsigned int irq);
1637
extern void unmask_msi_irq(unsigned int irq);
1638
extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
1639
+extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
1640
extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
1641
extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
1642
+extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
1643
extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
1646
diff --git a/include/linux/sched.h b/include/linux/sched.h
1647
index cc24beb..957a25f 100644
1648
--- a/include/linux/sched.h
1649
+++ b/include/linux/sched.h
1650
@@ -145,7 +145,6 @@ extern unsigned long this_cpu_load(void);
1653
extern void calc_global_load(void);
1654
-extern u64 cpu_nr_migrations(int cpu);
1656
extern unsigned long get_parent_ip(unsigned long addr);
1658
@@ -1001,6 +1000,7 @@ struct sched_domain {
1662
+ unsigned int span_weight;
1664
* Span of all CPUs in this domain.
1666
@@ -1072,7 +1072,8 @@ struct sched_domain;
1667
struct sched_class {
1668
const struct sched_class *next;
1670
- void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
1671
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
1673
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1674
void (*yield_task) (struct rq *rq);
1676
@@ -1082,7 +1083,8 @@ struct sched_class {
1677
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1680
- int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1681
+ int (*select_task_rq)(struct rq *rq, struct task_struct *p,
1682
+ int sd_flag, int flags);
1684
unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1685
struct rq *busiest, unsigned long max_load_move,
1686
@@ -1094,7 +1096,8 @@ struct sched_class {
1687
enum cpu_idle_type idle);
1688
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1689
void (*post_schedule) (struct rq *this_rq);
1690
- void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1691
+ void (*task_waking) (struct rq *this_rq, struct task_struct *task);
1692
+ void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1694
void (*set_cpus_allowed)(struct task_struct *p,
1695
const struct cpumask *newmask);
1696
@@ -1105,7 +1108,7 @@ struct sched_class {
1698
void (*set_curr_task) (struct rq *rq);
1699
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1700
- void (*task_new) (struct rq *rq, struct task_struct *p);
1701
+ void (*task_fork) (struct task_struct *p);
1703
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
1705
@@ -1114,10 +1117,11 @@ struct sched_class {
1706
void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1707
int oldprio, int running);
1709
- unsigned int (*get_rr_interval) (struct task_struct *task);
1710
+ unsigned int (*get_rr_interval) (struct rq *rq,
1711
+ struct task_struct *task);
1713
#ifdef CONFIG_FAIR_GROUP_SCHED
1714
- void (*moved_group) (struct task_struct *p);
1715
+ void (*moved_group) (struct task_struct *p, int on_rq);
1719
@@ -1178,7 +1182,6 @@ struct sched_entity {
1720
u64 nr_failed_migrations_running;
1721
u64 nr_failed_migrations_hot;
1722
u64 nr_forced_migrations;
1723
- u64 nr_forced2_migrations;
1726
u64 nr_wakeups_sync;
1727
@@ -1886,6 +1889,7 @@ extern void sched_clock_idle_sleep_event(void);
1728
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
1730
#ifdef CONFIG_HOTPLUG_CPU
1731
+extern void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p);
1732
extern void idle_task_exit(void);
1734
static inline void idle_task_exit(void) {}
1735
diff --git a/include/linux/topology.h b/include/linux/topology.h
1736
index 57e6357..5b81156 100644
1737
--- a/include/linux/topology.h
1738
+++ b/include/linux/topology.h
1739
@@ -99,7 +99,7 @@ int arch_update_cpu_topology(void);
1740
| 1*SD_WAKE_AFFINE \
1741
| 1*SD_SHARE_CPUPOWER \
1742
| 0*SD_POWERSAVINGS_BALANCE \
1743
- | 0*SD_SHARE_PKG_RESOURCES \
1744
+ | 1*SD_SHARE_PKG_RESOURCES \
1746
| 0*SD_PREFER_SIBLING \
1748
diff --git a/kernel/compat.c b/kernel/compat.c
1749
index 180d188..8bc5578 100644
1750
--- a/kernel/compat.c
1751
+++ b/kernel/compat.c
1753
#include <linux/posix-timers.h>
1754
#include <linux/times.h>
1755
#include <linux/ptrace.h>
1756
+#include <linux/module.h>
1758
#include <asm/uaccess.h>
1760
@@ -1136,3 +1137,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
1766
+ * Allocate user-space memory for the duration of a single system call,
1767
+ * in order to marshall parameters inside a compat thunk.
1769
+void __user *compat_alloc_user_space(unsigned long len)
1773
+ /* If len would occupy more than half of the entire compat space... */
1774
+ if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
1777
+ ptr = arch_compat_alloc_user_space(len);
1779
+ if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
1784
+EXPORT_SYMBOL_GPL(compat_alloc_user_space);
1785
diff --git a/kernel/cpu.c b/kernel/cpu.c
1786
index 291ac58..7e8b6ac 100644
1789
@@ -151,7 +151,7 @@ static inline void check_for_tasks(int cpu)
1791
write_lock_irq(&tasklist_lock);
1792
for_each_process(p) {
1793
- if (task_cpu(p) == cpu &&
1794
+ if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
1795
(!cputime_eq(p->utime, cputime_zero) ||
1796
!cputime_eq(p->stime, cputime_zero)))
1797
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
1798
@@ -163,6 +163,7 @@ static inline void check_for_tasks(int cpu)
1801
struct take_cpu_down_param {
1802
+ struct task_struct *caller;
1806
@@ -171,6 +172,7 @@ struct take_cpu_down_param {
1807
static int __ref take_cpu_down(void *_param)
1809
struct take_cpu_down_param *param = _param;
1810
+ unsigned int cpu = (unsigned long)param->hcpu;
1813
/* Ensure this CPU doesn't handle any more interrupts. */
1814
@@ -181,6 +183,8 @@ static int __ref take_cpu_down(void *_param)
1815
raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
1818
+ if (task_cpu(param->caller) == cpu)
1819
+ move_task_off_dead_cpu(cpu, param->caller);
1820
/* Force idle task to run as soon as we yield: it should
1821
immediately notice cpu is offline and die quickly. */
1823
@@ -191,10 +195,10 @@ static int __ref take_cpu_down(void *_param)
1824
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1826
int err, nr_calls = 0;
1827
- cpumask_var_t old_allowed;
1828
void *hcpu = (void *)(long)cpu;
1829
unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
1830
struct take_cpu_down_param tcd_param = {
1831
+ .caller = current,
1835
@@ -205,10 +209,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1836
if (!cpu_online(cpu))
1839
- if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1842
cpu_hotplug_begin();
1843
+ set_cpu_active(cpu, false);
1844
err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
1845
hcpu, -1, &nr_calls);
1846
if (err == NOTIFY_BAD) {
1847
@@ -223,10 +225,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1851
- /* Ensure that we are not runnable on dying cpu */
1852
- cpumask_copy(old_allowed, ¤t->cpus_allowed);
1853
- set_cpus_allowed_ptr(current, cpu_active_mask);
1855
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
1857
set_cpu_active(cpu, true);
1858
@@ -235,7 +233,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1859
hcpu) == NOTIFY_BAD)
1865
BUG_ON(cpu_online(cpu));
1867
@@ -253,8 +251,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
1869
check_for_tasks(cpu);
1872
- set_cpus_allowed_ptr(current, old_allowed);
1876
@@ -262,7 +258,6 @@ out_release:
1877
hcpu) == NOTIFY_BAD)
1880
- free_cpumask_var(old_allowed);
1884
@@ -280,18 +275,6 @@ int __ref cpu_down(unsigned int cpu)
1888
- set_cpu_active(cpu, false);
1891
- * Make sure the all cpus did the reschedule and are not
1892
- * using stale version of the cpu_active_mask.
1893
- * This is not strictly necessary becuase stop_machine()
1894
- * that we run down the line already provides the required
1895
- * synchronization. But it's really a side effect and we do not
1896
- * want to depend on the innards of the stop_machine here.
1898
- synchronize_sched();
1900
err = _cpu_down(cpu, 0);
1903
@@ -382,19 +365,12 @@ int disable_nonboot_cpus(void)
1905
cpu_maps_update_begin();
1906
first_cpu = cpumask_first(cpu_online_mask);
1907
- /* We take down all of the non-boot CPUs in one shot to avoid races
1909
+ * We take down all of the non-boot CPUs in one shot to avoid races
1910
* with the userspace trying to use the CPU hotplug at the same time
1912
cpumask_clear(frozen_cpus);
1914
- for_each_online_cpu(cpu) {
1915
- if (cpu == first_cpu)
1917
- set_cpu_active(cpu, false);
1920
- synchronize_sched();
1922
printk("Disabling non-boot CPUs ...\n");
1923
for_each_online_cpu(cpu) {
1924
if (cpu == first_cpu)
1925
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
1926
index a81a910..b120fd0 100644
1927
--- a/kernel/cpuset.c
1928
+++ b/kernel/cpuset.c
1929
@@ -2145,19 +2145,52 @@ void __init cpuset_init_smp(void)
1930
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
1932
mutex_lock(&callback_mutex);
1933
- cpuset_cpus_allowed_locked(tsk, pmask);
1935
+ guarantee_online_cpus(task_cs(tsk), pmask);
1937
mutex_unlock(&callback_mutex);
1941
- * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
1942
- * Must be called with callback_mutex held.
1944
-void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
1945
+int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
1948
- guarantee_online_cpus(task_cs(tsk), pmask);
1950
+ const struct cpuset *cs;
1954
+ cs = task_cs(tsk);
1956
+ cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
1957
+ rcu_read_unlock();
1960
+ * We own tsk->cpus_allowed, nobody can change it under us.
1962
+ * But we used cs && cs->cpus_allowed lockless and thus can
1963
+ * race with cgroup_attach_task() or update_cpumask() and get
1964
+ * the wrong tsk->cpus_allowed. However, both cases imply the
1965
+ * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
1966
+ * which takes task_rq_lock().
1968
+ * If we are called after it dropped the lock we must see all
1969
+ * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
1970
+ * set any mask even if it is not right from task_cs() pov,
1971
+ * the pending set_cpus_allowed_ptr() will fix things.
1974
+ cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
1975
+ if (cpu >= nr_cpu_ids) {
1977
+ * Either tsk->cpus_allowed is wrong (see above) or it
1978
+ * is actually empty. The latter case is only possible
1979
+ * if we are racing with remove_tasks_in_empty_cpuset().
1980
+ * Like above we can temporary set any mask and rely on
1981
+ * set_cpus_allowed_ptr() as synchronization point.
1983
+ cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
1984
+ cpu = cpumask_any(cpu_active_mask);
1990
void cpuset_init_current_mems_allowed(void)
1991
@@ -2346,22 +2379,6 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
1995
- * cpuset_lock - lock out any changes to cpuset structures
1997
- * The out of memory (oom) code needs to mutex_lock cpusets
1998
- * from being changed while it scans the tasklist looking for a
1999
- * task in an overlapping cpuset. Expose callback_mutex via this
2000
- * cpuset_lock() routine, so the oom code can lock it, before
2001
- * locking the task list. The tasklist_lock is a spinlock, so
2002
- * must be taken inside callback_mutex.
2005
-void cpuset_lock(void)
2007
- mutex_lock(&callback_mutex);
2011
* cpuset_unlock - release lock on cpuset changes
2013
* Undo the lock taken in a previous cpuset_lock() call.
2014
diff --git a/kernel/fork.c b/kernel/fork.c
2015
index 9f3b066..4bde56f 100644
2018
@@ -1233,21 +1233,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
2019
/* Need tasklist lock for parent etc handling! */
2020
write_lock_irq(&tasklist_lock);
2023
- * The task hasn't been attached yet, so its cpus_allowed mask will
2024
- * not be changed, nor will its assigned CPU.
2026
- * The cpus_allowed mask of the parent may have changed after it was
2027
- * copied first time - so re-copy it here, then check the child's CPU
2028
- * to ensure it is on a valid CPU (and if not, just force it back to
2029
- * parent's CPU). This avoids alot of nasty races.
2031
- p->cpus_allowed = current->cpus_allowed;
2032
- p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed;
2033
- if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
2034
- !cpu_online(task_cpu(p))))
2035
- set_task_cpu(p, smp_processor_id());
2037
/* CLONE_PARENT re-uses the old parent */
2038
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
2039
p->real_parent = current->real_parent;
2040
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
2041
index ef3c3f8..f83972b 100644
2042
--- a/kernel/gcov/fs.c
2043
+++ b/kernel/gcov/fs.c
2045
* @children: child nodes
2046
* @all: list head for list of all nodes
2047
* @parent: parent node
2048
- * @info: associated profiling data structure if not a directory
2049
- * @ghost: when an object file containing profiling data is unloaded we keep a
2050
- * copy of the profiling data here to allow collecting coverage data
2051
- * for cleanup code. Such a node is called a "ghost".
2052
+ * @loaded_info: array of pointers to profiling data sets for loaded object
2054
+ * @num_loaded: number of profiling data sets for loaded object files.
2055
+ * @unloaded_info: accumulated copy of profiling data sets for unloaded
2056
+ * object files. Used only when gcov_persist=1.
2057
* @dentry: main debugfs entry, either a directory or data file
2058
* @links: associated symbolic links
2059
* @name: data file basename
2060
@@ -51,10 +52,11 @@ struct gcov_node {
2061
struct list_head children;
2062
struct list_head all;
2063
struct gcov_node *parent;
2064
- struct gcov_info *info;
2065
- struct gcov_info *ghost;
2066
+ struct gcov_info **loaded_info;
2067
+ struct gcov_info *unloaded_info;
2068
struct dentry *dentry;
2069
struct dentry **links;
2074
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
2078
- * Return the profiling data set for a given node. This can either be the
2079
- * original profiling data structure or a duplicate (also called "ghost")
2080
- * in case the associated object file has been unloaded.
2081
+ * Return a profiling data set associated with the given node. This is
2082
+ * either a data set for a loaded object file or a data set copy in case
2083
+ * all associated object files have been unloaded.
2085
static struct gcov_info *get_node_info(struct gcov_node *node)
2088
- return node->info;
2089
+ if (node->num_loaded > 0)
2090
+ return node->loaded_info[0];
2092
- return node->ghost;
2093
+ return node->unloaded_info;
2097
+ * Return a newly allocated profiling data set which contains the sum of
2098
+ * all profiling data associated with the given node.
2100
+static struct gcov_info *get_accumulated_info(struct gcov_node *node)
2102
+ struct gcov_info *info;
2105
+ if (node->unloaded_info)
2106
+ info = gcov_info_dup(node->unloaded_info);
2108
+ info = gcov_info_dup(node->loaded_info[i++]);
2111
+ for (; i < node->num_loaded; i++)
2112
+ gcov_info_add(info, node->loaded_info[i]);
2118
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
2119
mutex_lock(&node_lock);
2121
* Read from a profiling data copy to minimize reference tracking
2122
- * complexity and concurrent access.
2123
+ * complexity and concurrent access and to keep accumulating multiple
2124
+ * profiling data sets associated with one node simple.
2126
- info = gcov_info_dup(get_node_info(node));
2127
+ info = get_accumulated_info(node);
2130
iter = gcov_iter_new(info);
2131
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
2136
+ * Reset all profiling data associated with the specified node.
2138
+static void reset_node(struct gcov_node *node)
2142
+ if (node->unloaded_info)
2143
+ gcov_info_reset(node->unloaded_info);
2144
+ for (i = 0; i < node->num_loaded; i++)
2145
+ gcov_info_reset(node->loaded_info[i]);
2148
static void remove_node(struct gcov_node *node);
2151
* write() implementation for gcov data files. Reset profiling data for the
2152
- * associated file. If the object file has been unloaded (i.e. this is
2153
- * a "ghost" node), remove the debug fs node as well.
2154
+ * corresponding file. If all associated object files have been unloaded,
2155
+ * remove the debug fs node as well.
2157
static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
2158
size_t len, loff_t *pos)
2159
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
2160
node = get_node_by_name(info->filename);
2162
/* Reset counts or remove node for unloaded modules. */
2164
+ if (node->num_loaded == 0)
2167
- gcov_info_reset(node->info);
2170
/* Reset counts for open file. */
2171
gcov_info_reset(info);
2172
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
2173
INIT_LIST_HEAD(&node->list);
2174
INIT_LIST_HEAD(&node->children);
2175
INIT_LIST_HEAD(&node->all);
2176
- node->info = info;
2177
+ if (node->loaded_info) {
2178
+ node->loaded_info[0] = info;
2179
+ node->num_loaded = 1;
2181
node->parent = parent;
2183
strcpy(node->name, name);
2184
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
2185
struct gcov_node *node;
2187
node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
2189
- pr_warning("out of memory\n");
2194
+ node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
2196
+ if (!node->loaded_info)
2199
init_node(node, info, name, parent);
2200
/* Differentiate between gcov data file nodes and directory nodes. */
2201
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
2202
list_add(&node->all, &all_head);
2208
+ pr_warning("out of memory\n");
2212
/* Remove symbolic links associated with node. */
2213
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
2214
list_del(&node->all);
2215
debugfs_remove(node->dentry);
2218
- gcov_info_free(node->ghost);
2219
+ kfree(node->loaded_info);
2220
+ if (node->unloaded_info)
2221
+ gcov_info_free(node->unloaded_info);
2225
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
2228
* write() implementation for reset file. Reset all profiling data to zero
2229
- * and remove ghost nodes.
2230
+ * and remove nodes for which all associated object files are unloaded.
2232
static ssize_t reset_write(struct file *file, const char __user *addr,
2233
size_t len, loff_t *pos)
2234
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
2235
mutex_lock(&node_lock);
2237
list_for_each_entry(node, &all_head, all) {
2239
- gcov_info_reset(node->info);
2240
+ if (node->num_loaded > 0)
2242
else if (list_empty(&node->children)) {
2244
/* Several nodes may have gone - restart loop. */
2245
@@ -564,37 +614,115 @@ err_remove:
2249
- * The profiling data set associated with this node is being unloaded. Store a
2250
- * copy of the profiling data and turn this node into a "ghost".
2251
+ * Associate a profiling data set with an existing node. Needs to be called
2252
+ * with node_lock held.
2254
-static int ghost_node(struct gcov_node *node)
2255
+static void add_info(struct gcov_node *node, struct gcov_info *info)
2257
- node->ghost = gcov_info_dup(node->info);
2258
- if (!node->ghost) {
2259
- pr_warning("could not save data for '%s' (out of memory)\n",
2260
- node->info->filename);
2262
+ struct gcov_info **loaded_info;
2263
+ int num = node->num_loaded;
2266
+ * Prepare new array. This is done first to simplify cleanup in
2267
+ * case the new data set is incompatible, the node only contains
2268
+ * unloaded data sets and there's not enough memory for the array.
2270
+ loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
2271
+ if (!loaded_info) {
2272
+ pr_warning("could not add '%s' (out of memory)\n",
2276
+ memcpy(loaded_info, node->loaded_info,
2277
+ num * sizeof(struct gcov_info *));
2278
+ loaded_info[num] = info;
2279
+ /* Check if the new data set is compatible. */
2282
+ * A module was unloaded, modified and reloaded. The new
2283
+ * data set replaces the copy of the last one.
2285
+ if (!gcov_info_is_compatible(node->unloaded_info, info)) {
2286
+ pr_warning("discarding saved data for %s "
2287
+ "(incompatible version)\n", info->filename);
2288
+ gcov_info_free(node->unloaded_info);
2289
+ node->unloaded_info = NULL;
2293
+ * Two different versions of the same object file are loaded.
2294
+ * The initial one takes precedence.
2296
+ if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
2297
+ pr_warning("could not add '%s' (incompatible "
2298
+ "version)\n", info->filename);
2299
+ kfree(loaded_info);
2303
- node->info = NULL;
2304
+ /* Overwrite previous array. */
2305
+ kfree(node->loaded_info);
2306
+ node->loaded_info = loaded_info;
2307
+ node->num_loaded = num + 1;
2312
+ * Return the index of a profiling data set associated with a node.
2314
+static int get_info_index(struct gcov_node *node, struct gcov_info *info)
2318
+ for (i = 0; i < node->num_loaded; i++) {
2319
+ if (node->loaded_info[i] == info)
2326
- * Profiling data for this node has been loaded again. Add profiling data
2327
- * from previous instantiation and turn this node into a regular node.
2328
+ * Save the data of a profiling data set which is being unloaded.
2330
-static void revive_node(struct gcov_node *node, struct gcov_info *info)
2331
+static void save_info(struct gcov_node *node, struct gcov_info *info)
2333
- if (gcov_info_is_compatible(node->ghost, info))
2334
- gcov_info_add(info, node->ghost);
2335
+ if (node->unloaded_info)
2336
+ gcov_info_add(node->unloaded_info, info);
2338
- pr_warning("discarding saved data for '%s' (version changed)\n",
2339
+ node->unloaded_info = gcov_info_dup(info);
2340
+ if (!node->unloaded_info) {
2341
+ pr_warning("could not save data for '%s' "
2342
+ "(out of memory)\n", info->filename);
2348
+ * Disassociate a profiling data set from a node. Needs to be called with
2351
+static void remove_info(struct gcov_node *node, struct gcov_info *info)
2355
+ i = get_info_index(node, info);
2357
+ pr_warning("could not remove '%s' (not found)\n",
2361
- gcov_info_free(node->ghost);
2362
- node->ghost = NULL;
2363
- node->info = info;
2365
+ save_info(node, info);
2366
+ /* Shrink array. */
2367
+ node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
2368
+ node->num_loaded--;
2369
+ if (node->num_loaded > 0)
2371
+ /* Last loaded data set was removed. */
2372
+ kfree(node->loaded_info);
2373
+ node->loaded_info = NULL;
2374
+ node->num_loaded = 0;
2375
+ if (!node->unloaded_info)
2376
+ remove_node(node);
2380
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
2381
node = get_node_by_name(info->filename);
2384
- /* Add new node or revive ghost. */
2387
+ add_info(node, info);
2393
- revive_node(node, info);
2395
- pr_warning("could not add '%s' (already exists)\n",
2400
- /* Remove node or turn into ghost. */
2403
+ remove_info(node, info);
2405
pr_warning("could not remove '%s' (not found)\n",
2409
- if (gcov_persist) {
2410
- if (!ghost_node(node))
2413
- remove_node(node);
2416
mutex_unlock(&node_lock);
2417
diff --git a/kernel/groups.c b/kernel/groups.c
2418
index 2b45b2e..f0c2528 100644
2419
--- a/kernel/groups.c
2420
+++ b/kernel/groups.c
2421
@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
2422
right = group_info->ngroups;
2423
while (left < right) {
2424
unsigned int mid = (left+right)/2;
2425
- int cmp = grp - GROUP_AT(group_info, mid);
2427
+ if (grp > GROUP_AT(group_info, mid))
2430
+ else if (grp < GROUP_AT(group_info, mid))
2434
diff --git a/kernel/sched.c b/kernel/sched.c
2435
index 9990074..152214d 100644
2436
--- a/kernel/sched.c
2437
+++ b/kernel/sched.c
2438
@@ -542,7 +542,6 @@ struct rq {
2439
struct load_weight load;
2440
unsigned long nr_load_updates;
2442
- u64 nr_migrations_in;
2446
@@ -943,14 +942,25 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
2447
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
2450
+ * Check whether the task is waking, we use this to synchronize ->cpus_allowed
2453
+static inline int task_is_waking(struct task_struct *p)
2455
+ return unlikely(p->state == TASK_WAKING);
2459
* __task_rq_lock - lock the runqueue a given task resides on.
2460
* Must be called interrupts disabled.
2462
static inline struct rq *__task_rq_lock(struct task_struct *p)
2463
__acquires(rq->lock)
2468
- struct rq *rq = task_rq(p);
2470
spin_lock(&rq->lock);
2471
if (likely(rq == task_rq(p)))
2473
@@ -1822,6 +1832,20 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
2474
static void calc_load_account_active(struct rq *this_rq);
2475
static void update_sysctl(void);
2477
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
2479
+ set_task_rq(p, cpu);
2482
+ * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
2483
+ * successfuly executed on another CPU. We must ensure that updates of
2484
+ * per-task data have been completed by this moment.
2487
+ task_thread_info(p)->cpu = cpu;
2491
#include "sched_stats.h"
2492
#include "sched_idletask.c"
2493
#include "sched_fair.c"
2494
@@ -1871,13 +1895,14 @@ static void update_avg(u64 *avg, u64 sample)
2498
-static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
2500
+enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
2503
p->se.start_runtime = p->se.sum_exec_runtime;
2505
sched_info_queued(p);
2506
- p->sched_class->enqueue_task(rq, p, wakeup);
2507
+ p->sched_class->enqueue_task(rq, p, wakeup, head);
2511
@@ -1953,7 +1978,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
2512
if (task_contributes_to_load(p))
2513
rq->nr_uninterruptible--;
2515
- enqueue_task(rq, p, wakeup);
2516
+ enqueue_task(rq, p, wakeup, false);
2520
@@ -1978,20 +2003,6 @@ inline int task_curr(const struct task_struct *p)
2521
return cpu_curr(task_cpu(p)) == p;
2524
-static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
2526
- set_task_rq(p, cpu);
2529
- * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
2530
- * successfuly executed on another CPU. We must ensure that updates of
2531
- * per-task data have been completed by this moment.
2534
- task_thread_info(p)->cpu = cpu;
2538
static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2539
const struct sched_class *prev_class,
2540
int oldprio, int running)
2541
@@ -2018,21 +2029,15 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2543
void kthread_bind(struct task_struct *p, unsigned int cpu)
2545
- struct rq *rq = cpu_rq(cpu);
2546
- unsigned long flags;
2548
/* Must have done schedule() in kthread() before we set_task_cpu */
2549
if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2554
- spin_lock_irqsave(&rq->lock, flags);
2555
- set_task_cpu(p, cpu);
2556
p->cpus_allowed = cpumask_of_cpu(cpu);
2557
p->rt.nr_cpus_allowed = 1;
2558
p->flags |= PF_THREAD_BOUND;
2559
- spin_unlock_irqrestore(&rq->lock, flags);
2561
EXPORT_SYMBOL(kthread_bind);
2563
@@ -2070,35 +2075,23 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2564
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2566
int old_cpu = task_cpu(p);
2567
- struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
2568
- struct cfs_rq *old_cfsrq = task_cfs_rq(p),
2569
- *new_cfsrq = cpu_cfs_rq(old_cfsrq, new_cpu);
2572
- clock_offset = old_rq->clock - new_rq->clock;
2573
+#ifdef CONFIG_SCHED_DEBUG
2575
+ * We should never call set_task_cpu() on a blocked task,
2576
+ * ttwu() will sort out the placement.
2578
+ WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2579
+ !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2582
trace_sched_migrate_task(p, new_cpu);
2584
-#ifdef CONFIG_SCHEDSTATS
2585
- if (p->se.wait_start)
2586
- p->se.wait_start -= clock_offset;
2587
- if (p->se.sleep_start)
2588
- p->se.sleep_start -= clock_offset;
2589
- if (p->se.block_start)
2590
- p->se.block_start -= clock_offset;
2592
if (old_cpu != new_cpu) {
2593
p->se.nr_migrations++;
2594
- new_rq->nr_migrations_in++;
2595
-#ifdef CONFIG_SCHEDSTATS
2596
- if (task_hot(p, old_rq->clock, NULL))
2597
- schedstat_inc(p, se.nr_forced2_migrations);
2599
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2602
- p->se.vruntime -= old_cfsrq->min_vruntime -
2603
- new_cfsrq->min_vruntime;
2605
__set_task_cpu(p, new_cpu);
2607
@@ -2331,6 +2324,69 @@ void task_oncpu_function_call(struct task_struct *p,
2613
+ * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2615
+static int select_fallback_rq(int cpu, struct task_struct *p)
2618
+ const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2620
+ /* Look for allowed, online CPU in same node. */
2621
+ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2622
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2625
+ /* Any allowed, online CPU? */
2626
+ dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2627
+ if (dest_cpu < nr_cpu_ids)
2630
+ /* No more Mr. Nice Guy. */
2631
+ if (unlikely(dest_cpu >= nr_cpu_ids)) {
2632
+ dest_cpu = cpuset_cpus_allowed_fallback(p);
2634
+ * Don't tell them about moving exiting tasks or
2635
+ * kernel threads (both mm NULL), since they never
2638
+ if (p->mm && printk_ratelimit()) {
2639
+ printk(KERN_INFO "process %d (%s) no "
2640
+ "longer affine to cpu%d\n",
2641
+ task_pid_nr(p), p->comm, cpu);
2649
+ * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
2652
+int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
2654
+ int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
2657
+ * In order not to call set_task_cpu() on a blocking task we need
2658
+ * to rely on ttwu() to place the task on a valid ->cpus_allowed
2661
+ * Since this is common to all placement strategies, this lives here.
2663
+ * [ this allows ->select_task() to simply return task_cpu(p) and
2664
+ * not worry about this generic constraint ]
2666
+ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2667
+ !cpu_online(cpu)))
2668
+ cpu = select_fallback_rq(task_cpu(p), p);
2675
* try_to_wake_up - wake up a thread
2676
* @p: the to-be-woken-up thread
2677
@@ -2379,22 +2435,34 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2679
* First fix up the nr_uninterruptible count:
2681
- if (task_contributes_to_load(p))
2682
- rq->nr_uninterruptible--;
2683
+ if (task_contributes_to_load(p)) {
2684
+ if (likely(cpu_online(orig_cpu)))
2685
+ rq->nr_uninterruptible--;
2687
+ this_rq()->nr_uninterruptible--;
2689
p->state = TASK_WAKING;
2690
- task_rq_unlock(rq, &flags);
2692
- cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2693
+ if (p->sched_class->task_waking)
2694
+ p->sched_class->task_waking(rq, p);
2696
+ cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2697
if (cpu != orig_cpu)
2698
set_task_cpu(p, cpu);
2699
+ __task_rq_unlock(rq);
2701
- rq = task_rq_lock(p, &flags);
2703
- if (rq != orig_rq)
2704
- update_rq_clock(rq);
2706
+ spin_lock(&rq->lock);
2707
+ update_rq_clock(rq);
2710
+ * We migrated the task without holding either rq->lock, however
2711
+ * since the task is not on the task list itself, nobody else
2712
+ * will try and migrate the task, hence the rq should match the
2713
+ * cpu we just moved it to.
2715
+ WARN_ON(task_cpu(p) != cpu);
2716
WARN_ON(p->state != TASK_WAKING);
2717
- cpu = task_cpu(p);
2719
#ifdef CONFIG_SCHEDSTATS
2720
schedstat_inc(rq, ttwu_count);
2721
@@ -2447,8 +2515,8 @@ out_running:
2723
p->state = TASK_RUNNING;
2725
- if (p->sched_class->task_wake_up)
2726
- p->sched_class->task_wake_up(rq, p);
2727
+ if (p->sched_class->task_woken)
2728
+ p->sched_class->task_woken(rq, p);
2730
if (unlikely(rq->idle_stamp)) {
2731
u64 delta = rq->clock - rq->idle_stamp;
2732
@@ -2528,7 +2596,6 @@ static void __sched_fork(struct task_struct *p)
2733
p->se.nr_failed_migrations_running = 0;
2734
p->se.nr_failed_migrations_hot = 0;
2735
p->se.nr_forced_migrations = 0;
2736
- p->se.nr_forced2_migrations = 0;
2738
p->se.nr_wakeups = 0;
2739
p->se.nr_wakeups_sync = 0;
2740
@@ -2549,14 +2616,6 @@ static void __sched_fork(struct task_struct *p)
2741
#ifdef CONFIG_PREEMPT_NOTIFIERS
2742
INIT_HLIST_HEAD(&p->preempt_notifiers);
2746
- * We mark the process as running here, but have not actually
2747
- * inserted it onto the runqueue yet. This guarantees that
2748
- * nobody will actually run it, and a signal or other external
2749
- * event cannot wake it up and insert it on the runqueue either.
2751
- p->state = TASK_RUNNING;
2755
@@ -2567,6 +2626,12 @@ void sched_fork(struct task_struct *p, int clone_flags)
2756
int cpu = get_cpu();
2760
+ * We mark the process as running here. This guarantees that
2761
+ * nobody will actually run it, and a signal or other external
2762
+ * event cannot wake it up and insert it on the runqueue either.
2764
+ p->state = TASK_RUNNING;
2767
* Revert to default priority/policy on fork if requested.
2768
@@ -2598,9 +2663,9 @@ void sched_fork(struct task_struct *p, int clone_flags)
2769
if (!rt_prio(p->prio))
2770
p->sched_class = &fair_sched_class;
2773
- cpu = p->sched_class->select_task_rq(p, SD_BALANCE_FORK, 0);
2775
+ if (p->sched_class->task_fork)
2776
+ p->sched_class->task_fork(p);
2778
set_task_cpu(p, cpu);
2780
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2781
@@ -2630,28 +2695,38 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2783
unsigned long flags;
2785
+ int cpu = get_cpu();
2788
rq = task_rq_lock(p, &flags);
2789
- BUG_ON(p->state != TASK_RUNNING);
2790
- update_rq_clock(rq);
2791
+ p->state = TASK_WAKING;
2793
- if (!p->sched_class->task_new || !current->se.on_rq) {
2794
- activate_task(rq, p, 0);
2797
- * Let the scheduling class do new task startup
2798
- * management (if any):
2800
- p->sched_class->task_new(rq, p);
2801
- inc_nr_running(rq);
2804
+ * Fork balancing, do it here and not earlier because:
2805
+ * - cpus_allowed can change in the fork path
2806
+ * - any previously selected cpu might disappear through hotplug
2808
+ * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2809
+ * without people poking at ->cpus_allowed.
2811
+ cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
2812
+ set_task_cpu(p, cpu);
2814
+ p->state = TASK_RUNNING;
2815
+ task_rq_unlock(rq, &flags);
2818
+ rq = task_rq_lock(p, &flags);
2819
+ update_rq_clock(rq);
2820
+ activate_task(rq, p, 0);
2821
trace_sched_wakeup_new(rq, p, 1);
2822
check_preempt_curr(rq, p, WF_FORK);
2824
- if (p->sched_class->task_wake_up)
2825
- p->sched_class->task_wake_up(rq, p);
2826
+ if (p->sched_class->task_woken)
2827
+ p->sched_class->task_woken(rq, p);
2829
task_rq_unlock(rq, &flags);
2833
#ifdef CONFIG_PREEMPT_NOTIFIERS
2834
@@ -3038,15 +3113,6 @@ static void calc_load_account_active(struct rq *this_rq)
2838
- * Externally visible per-cpu scheduler statistics:
2839
- * cpu_nr_migrations(cpu) - number of migrations into that cpu
2841
-u64 cpu_nr_migrations(int cpu)
2843
- return cpu_rq(cpu)->nr_migrations_in;
2847
* Update rq->cpu_load[] statistics. This function is usually called every
2848
* scheduler tick (TICK_NSEC).
2850
@@ -3128,24 +3194,28 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2854
- * If dest_cpu is allowed for this process, migrate the task to it.
2855
- * This is accomplished by forcing the cpu_allowed mask to only
2856
- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
2857
- * the cpu_allowed mask is restored.
2858
+ * sched_exec - execve() is a valuable balancing opportunity, because at
2859
+ * this point the task has the smallest effective memory and cache footprint.
2861
-static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2862
+void sched_exec(void)
2864
+ struct task_struct *p = current;
2865
struct migration_req req;
2866
unsigned long flags;
2870
rq = task_rq_lock(p, &flags);
2871
- if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
2872
- || unlikely(!cpu_active(dest_cpu)))
2874
+ dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
2875
+ if (dest_cpu == smp_processor_id())
2878
- /* force the process onto the specified CPU */
2879
- if (migrate_task(p, dest_cpu, &req)) {
2881
+ * select_task_rq() can race against ->cpus_allowed
2883
+ if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
2884
+ likely(cpu_active(dest_cpu)) &&
2885
+ migrate_task(p, dest_cpu, &req)) {
2886
/* Need to wait for migration thread (might exit: take ref). */
2887
struct task_struct *mt = rq->migration_thread;
2889
@@ -3157,24 +3227,11 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2895
task_rq_unlock(rq, &flags);
2899
- * sched_exec - execve() is a valuable balancing opportunity, because at
2900
- * this point the task has the smallest effective memory and cache footprint.
2902
-void sched_exec(void)
2904
- int new_cpu, this_cpu = get_cpu();
2905
- new_cpu = current->sched_class->select_task_rq(current, SD_BALANCE_EXEC, 0);
2907
- if (new_cpu != this_cpu)
2908
- sched_migrate_task(current, new_cpu);
2912
* pull_task - move a task from a remote runqueue to the local runqueue.
2913
* Both runqueues must be locked.
2915
@@ -3621,7 +3678,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2917
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2919
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
2920
+ unsigned long weight = sd->span_weight;
2921
unsigned long smt_gain = sd->smt_gain;
2924
@@ -3654,7 +3711,7 @@ unsigned long scale_rt_power(int cpu)
2926
static void update_cpu_power(struct sched_domain *sd, int cpu)
2928
- unsigned long weight = cpumask_weight(sched_domain_span(sd));
2929
+ unsigned long weight = sd->span_weight;
2930
unsigned long power = SCHED_LOAD_SCALE;
2931
struct sched_group *sdg = sd->groups;
2933
@@ -5974,14 +6031,15 @@ EXPORT_SYMBOL(wait_for_completion_killable);
2935
bool try_wait_for_completion(struct completion *x)
2937
+ unsigned long flags;
2940
- spin_lock_irq(&x->wait.lock);
2941
+ spin_lock_irqsave(&x->wait.lock, flags);
2946
- spin_unlock_irq(&x->wait.lock);
2947
+ spin_unlock_irqrestore(&x->wait.lock, flags);
2950
EXPORT_SYMBOL(try_wait_for_completion);
2951
@@ -5996,12 +6054,13 @@ EXPORT_SYMBOL(try_wait_for_completion);
2953
bool completion_done(struct completion *x)
2955
+ unsigned long flags;
2958
- spin_lock_irq(&x->wait.lock);
2959
+ spin_lock_irqsave(&x->wait.lock, flags);
2962
- spin_unlock_irq(&x->wait.lock);
2963
+ spin_unlock_irqrestore(&x->wait.lock, flags);
2966
EXPORT_SYMBOL(completion_done);
2967
@@ -6095,7 +6154,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
2969
p->sched_class->set_curr_task(rq);
2971
- enqueue_task(rq, p, 0);
2972
+ enqueue_task(rq, p, 0, oldprio < prio);
2974
check_class_changed(rq, p, prev_class, oldprio, running);
2976
@@ -6139,7 +6198,7 @@ void set_user_nice(struct task_struct *p, long nice)
2977
delta = p->prio - old_prio;
2980
- enqueue_task(rq, p, 0);
2981
+ enqueue_task(rq, p, 0, false);
2983
* If the task increased its priority or is running and
2984
* lowered its priority, then reschedule its CPU:
2985
@@ -6530,7 +6589,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
2989
- read_lock(&tasklist_lock);
2991
p = find_process_by_pid(pid);
2993
retval = security_task_getscheduler(p);
2994
@@ -6538,7 +6597,7 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
2996
| (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
2998
- read_unlock(&tasklist_lock);
2999
+ rcu_read_unlock();
3003
@@ -6556,7 +6615,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3004
if (!param || pid < 0)
3007
- read_lock(&tasklist_lock);
3009
p = find_process_by_pid(pid);
3012
@@ -6567,7 +6626,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3015
lp.sched_priority = p->rt_priority;
3016
- read_unlock(&tasklist_lock);
3017
+ rcu_read_unlock();
3020
* This one might sleep, we cannot do it with a spinlock held ...
3021
@@ -6577,7 +6636,7 @@ SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3025
- read_unlock(&tasklist_lock);
3026
+ rcu_read_unlock();
3030
@@ -6588,22 +6647,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
3034
- read_lock(&tasklist_lock);
3037
p = find_process_by_pid(pid);
3039
- read_unlock(&tasklist_lock);
3040
+ rcu_read_unlock();
3046
- * It is not safe to call set_cpus_allowed with the
3047
- * tasklist_lock held. We will bump the task_struct's
3048
- * usage count and then drop tasklist_lock.
3050
+ /* Prevent p going away */
3052
- read_unlock(&tasklist_lock);
3053
+ rcu_read_unlock();
3055
if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
3057
@@ -6684,10 +6739,12 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3058
long sched_getaffinity(pid_t pid, struct cpumask *mask)
3060
struct task_struct *p;
3061
+ unsigned long flags;
3066
- read_lock(&tasklist_lock);
3070
p = find_process_by_pid(pid);
3071
@@ -6698,10 +6755,12 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
3075
+ rq = task_rq_lock(p, &flags);
3076
cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
3077
+ task_rq_unlock(rq, &flags);
3080
- read_unlock(&tasklist_lock);
3081
+ rcu_read_unlock();
3085
@@ -6940,6 +6999,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
3087
struct task_struct *p;
3088
unsigned int time_slice;
3089
+ unsigned long flags;
3094
@@ -6947,7 +7008,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
3098
- read_lock(&tasklist_lock);
3100
p = find_process_by_pid(pid);
3103
@@ -6956,15 +7017,17 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
3107
- time_slice = p->sched_class->get_rr_interval(p);
3108
+ rq = task_rq_lock(p, &flags);
3109
+ time_slice = p->sched_class->get_rr_interval(rq, p);
3110
+ task_rq_unlock(rq, &flags);
3112
- read_unlock(&tasklist_lock);
3113
+ rcu_read_unlock();
3114
jiffies_to_timespec(time_slice, &t);
3115
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
3119
- read_unlock(&tasklist_lock);
3120
+ rcu_read_unlock();
3124
@@ -7055,6 +7118,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
3125
spin_lock_irqsave(&rq->lock, flags);
3128
+ idle->state = TASK_RUNNING;
3129
idle->se.exec_start = sched_clock();
3131
cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
3132
@@ -7149,7 +7213,19 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3137
+ * Serialize against TASK_WAKING so that ttwu() and wunt() can
3138
+ * drop the rq->lock and still rely on ->cpus_allowed.
3141
+ while (task_is_waking(p))
3143
rq = task_rq_lock(p, &flags);
3144
+ if (task_is_waking(p)) {
3145
+ task_rq_unlock(rq, &flags);
3149
if (!cpumask_intersects(new_mask, cpu_active_mask)) {
3152
@@ -7178,7 +7254,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
3154
get_task_struct(mt);
3155
task_rq_unlock(rq, &flags);
3156
- wake_up_process(rq->migration_thread);
3157
+ wake_up_process(mt);
3158
put_task_struct(mt);
3159
wait_for_completion(&req.done);
3160
tlb_migrate_finish(p->mm);
3161
@@ -7205,7 +7281,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
3162
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
3164
struct rq *rq_dest, *rq_src;
3165
- int ret = 0, on_rq;
3168
if (unlikely(!cpu_active(dest_cpu)))
3170
@@ -7217,19 +7293,17 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
3171
/* Already moved. */
3172
if (task_cpu(p) != src_cpu)
3174
- /* Waking up, don't get in the way of try_to_wake_up(). */
3175
- if (p->state == TASK_WAKING)
3177
/* Affinity changed (again). */
3178
if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
3181
- on_rq = p->se.on_rq;
3184
+ * If we're not on a rq, the next wake-up will ensure we're
3185
+ * placed properly.
3187
+ if (p->se.on_rq) {
3188
deactivate_task(rq_src, p, 0);
3190
- set_task_cpu(p, dest_cpu);
3192
+ set_task_cpu(p, dest_cpu);
3193
activate_task(rq_dest, p, 0);
3194
check_preempt_curr(rq_dest, p, 0);
3196
@@ -7308,57 +7382,29 @@ static int migration_thread(void *data)
3199
#ifdef CONFIG_HOTPLUG_CPU
3201
-static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
3205
- local_irq_disable();
3206
- ret = __migrate_task(p, src_cpu, dest_cpu);
3207
- local_irq_enable();
3212
* Figure out where task on dead CPU should go, use force if necessary.
3214
-static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
3215
+void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
3218
- const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
3221
- /* Look for allowed, online CPU in same node. */
3222
- for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
3223
- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
3226
- /* Any allowed, online CPU? */
3227
- dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
3228
- if (dest_cpu < nr_cpu_ids)
3231
- /* No more Mr. Nice Guy. */
3232
- if (dest_cpu >= nr_cpu_ids) {
3233
- cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
3234
- dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
3235
+ struct rq *rq = cpu_rq(dead_cpu);
3236
+ int needs_cpu, uninitialized_var(dest_cpu);
3237
+ unsigned long flags;
3240
- * Don't tell them about moving exiting tasks or
3241
- * kernel threads (both mm NULL), since they never
3244
- if (p->mm && printk_ratelimit()) {
3245
- printk(KERN_INFO "process %d (%s) no "
3246
- "longer affine to cpu%d\n",
3247
- task_pid_nr(p), p->comm, dead_cpu);
3250
+ local_irq_save(flags);
3253
- /* It can have affinity changed while we were choosing. */
3254
- if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
3256
+ spin_lock(&rq->lock);
3257
+ needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
3259
+ dest_cpu = select_fallback_rq(dead_cpu, p);
3260
+ spin_unlock(&rq->lock);
3262
+ * It can only fail if we race with set_cpus_allowed(),
3263
+ * in the racer should migrate the task anyway.
3266
+ __migrate_task(p, dead_cpu, dest_cpu);
3267
+ local_irq_restore(flags);
3271
@@ -7752,14 +7798,23 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
3272
cpu_rq(cpu)->migration_thread = NULL;
3276
- case CPU_DEAD_FROZEN:
3277
- cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
3278
- migrate_live_tasks(cpu);
3279
+ case CPU_POST_DEAD:
3281
+ * Bring the migration thread down in CPU_POST_DEAD event,
3282
+ * since the timers should have got migrated by now and thus
3283
+ * we should not see a deadlock between trying to kill the
3284
+ * migration thread and the sched_rt_period_timer.
3287
kthread_stop(rq->migration_thread);
3288
put_task_struct(rq->migration_thread);
3289
rq->migration_thread = NULL;
3293
+ case CPU_DEAD_FROZEN:
3294
+ migrate_live_tasks(cpu);
3296
/* Idle task back to normal (off runqueue, low prio) */
3297
spin_lock_irq(&rq->lock);
3298
update_rq_clock(rq);
3299
@@ -7768,7 +7823,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
3300
rq->idle->sched_class = &idle_sched_class;
3301
migrate_dead_tasks(cpu);
3302
spin_unlock_irq(&rq->lock);
3304
migrate_nr_uninterruptible(rq);
3305
BUG_ON(rq->nr_running != 0);
3306
calc_global_load_remove(rq);
3307
@@ -8112,6 +8166,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
3308
struct rq *rq = cpu_rq(cpu);
3309
struct sched_domain *tmp;
3311
+ for (tmp = sd; tmp; tmp = tmp->parent)
3312
+ tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
3314
/* Remove the sched domains which do not contribute to scheduling. */
3315
for (tmp = sd; tmp; ) {
3316
struct sched_domain *parent = tmp->parent;
3317
@@ -10099,13 +10156,13 @@ void sched_move_task(struct task_struct *tsk)
3319
#ifdef CONFIG_FAIR_GROUP_SCHED
3320
if (tsk->sched_class->moved_group)
3321
- tsk->sched_class->moved_group(tsk);
3322
+ tsk->sched_class->moved_group(tsk, on_rq);
3325
if (unlikely(running))
3326
tsk->sched_class->set_curr_task(rq);
3328
- enqueue_task(rq, tsk, 0);
3329
+ enqueue_task(rq, tsk, 0, false);
3331
task_rq_unlock(rq, &flags);
3333
@@ -10877,12 +10934,30 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
3337
+ * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
3338
+ * in cputime_t units. As a result, cpuacct_update_stats calls
3339
+ * percpu_counter_add with values large enough to always overflow the
3340
+ * per cpu batch limit causing bad SMP scalability.
3342
+ * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
3343
+ * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
3344
+ * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
3347
+#define CPUACCT_BATCH \
3348
+ min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
3350
+#define CPUACCT_BATCH 0
3354
* Charge the system/user time to the task's accounting group.
3356
static void cpuacct_update_stats(struct task_struct *tsk,
3357
enum cpuacct_stat_index idx, cputime_t val)
3360
+ int batch = CPUACCT_BATCH;
3362
if (unlikely(!cpuacct_subsys.active))
3364
@@ -10891,7 +10966,7 @@ static void cpuacct_update_stats(struct task_struct *tsk,
3368
- percpu_counter_add(&ca->cpustat[idx], val);
3369
+ __percpu_counter_add(&ca->cpustat[idx], val, batch);
3373
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
3374
index 6988cf0..6f836a8 100644
3375
--- a/kernel/sched_debug.c
3376
+++ b/kernel/sched_debug.c
3377
@@ -423,7 +423,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
3378
P(se.nr_failed_migrations_running);
3379
P(se.nr_failed_migrations_hot);
3380
P(se.nr_forced_migrations);
3381
- P(se.nr_forced2_migrations);
3383
P(se.nr_wakeups_sync);
3384
P(se.nr_wakeups_migrate);
3385
@@ -499,7 +498,6 @@ void proc_sched_set_task(struct task_struct *p)
3386
p->se.nr_failed_migrations_running = 0;
3387
p->se.nr_failed_migrations_hot = 0;
3388
p->se.nr_forced_migrations = 0;
3389
- p->se.nr_forced2_migrations = 0;
3390
p->se.nr_wakeups = 0;
3391
p->se.nr_wakeups_sync = 0;
3392
p->se.nr_wakeups_migrate = 0;
3393
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
3394
index d80812d..01e311e 100644
3395
--- a/kernel/sched_fair.c
3396
+++ b/kernel/sched_fair.c
3397
@@ -488,6 +488,7 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
3398
curr->sum_exec_runtime += delta_exec;
3399
schedstat_add(cfs_rq, exec_clock, delta_exec);
3400
delta_exec_weighted = calc_delta_fair(delta_exec, curr);
3402
curr->vruntime += delta_exec_weighted;
3403
update_min_vruntime(cfs_rq);
3405
@@ -743,16 +744,26 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3406
se->vruntime = vruntime;
3409
+#define ENQUEUE_WAKEUP 1
3410
+#define ENQUEUE_MIGRATE 2
3413
-enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
3414
+enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
3417
+ * Update the normalized vruntime before updating min_vruntime
3418
+ * through callig update_curr().
3420
+ if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE))
3421
+ se->vruntime += cfs_rq->min_vruntime;
3424
* Update run-time statistics of the 'current'.
3426
update_curr(cfs_rq);
3427
account_entity_enqueue(cfs_rq, se);
3430
+ if (flags & ENQUEUE_WAKEUP) {
3431
place_entity(cfs_rq, se, 0);
3432
enqueue_sleeper(cfs_rq, se);
3434
@@ -806,6 +817,14 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
3435
__dequeue_entity(cfs_rq, se);
3436
account_entity_dequeue(cfs_rq, se);
3437
update_min_vruntime(cfs_rq);
3440
+ * Normalize the entity after updating the min_vruntime because the
3441
+ * update can refer to the ->curr item and we need to reflect this
3442
+ * movement in our normalized position.
3445
+ se->vruntime -= cfs_rq->min_vruntime;
3449
@@ -1012,17 +1031,24 @@ static inline void hrtick_update(struct rq *rq)
3450
* increased. Here we update the fair scheduling stats and
3451
* then put the task into the rbtree:
3453
-static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
3455
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
3457
struct cfs_rq *cfs_rq;
3458
struct sched_entity *se = &p->se;
3462
+ flags |= ENQUEUE_WAKEUP;
3463
+ if (p->state == TASK_WAKING)
3464
+ flags |= ENQUEUE_MIGRATE;
3466
for_each_sched_entity(se) {
3469
cfs_rq = cfs_rq_of(se);
3470
- enqueue_entity(cfs_rq, se, wakeup);
3472
+ enqueue_entity(cfs_rq, se, flags);
3473
+ flags = ENQUEUE_WAKEUP;
3477
@@ -1098,6 +1124,14 @@ static void yield_task_fair(struct rq *rq)
3481
+static void task_waking_fair(struct rq *rq, struct task_struct *p)
3483
+ struct sched_entity *se = &p->se;
3484
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
3486
+ se->vruntime -= cfs_rq->min_vruntime;
3489
#ifdef CONFIG_FAIR_GROUP_SCHED
3491
* effective_load() calculates the load change as seen from the root_task_group
3492
@@ -1216,6 +1250,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3493
* effect of the currently running task from the load
3494
* of the current CPU:
3498
tg = task_group(current);
3499
weight = current->se.load.weight;
3500
@@ -1241,6 +1276,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
3501
balanced = !this_load ||
3502
100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
3503
imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
3504
+ rcu_read_unlock();
3507
* If the currently running task will sleep within
3508
@@ -1348,6 +1384,56 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3512
+ * Try and locate an idle CPU in the sched_domain.
3514
+static int select_idle_sibling(struct task_struct *p, int target)
3516
+ int cpu = smp_processor_id();
3517
+ int prev_cpu = task_cpu(p);
3518
+ struct sched_domain *sd;
3522
+ * If the task is going to be woken-up on this cpu and if it is
3523
+ * already idle, then it is the right target.
3525
+ if (target == cpu && idle_cpu(cpu))
3529
+ * If the task is going to be woken-up on the cpu where it previously
3530
+ * ran and if it is currently idle, then it the right target.
3532
+ if (target == prev_cpu && idle_cpu(prev_cpu))
3536
+ * Otherwise, iterate the domains and find an elegible idle cpu.
3538
+ for_each_domain(target, sd) {
3539
+ if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
3542
+ for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
3543
+ if (idle_cpu(i)) {
3550
+ * Lets stop looking for an idle sibling when we reached
3551
+ * the domain that spans the current cpu and prev_cpu.
3553
+ if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
3554
+ cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
3562
* sched_balance_self: balance the current task (running on cpu) in domains
3563
* that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3565
@@ -1358,7 +1444,8 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3567
* preempt must be disabled.
3569
-static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
3571
+select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
3573
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
3574
int cpu = smp_processor_id();
3575
@@ -1375,7 +1462,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
3580
for_each_domain(cpu, tmp) {
3581
if (!(tmp->flags & SD_LOAD_BALANCE))
3583
@@ -1404,38 +1490,14 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
3587
- if (want_affine && (tmp->flags & SD_WAKE_AFFINE)) {
3588
- int candidate = -1, i;
3590
- if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
3594
- * Check for an idle shared cache.
3596
- if (tmp->flags & SD_PREFER_SIBLING) {
3597
- if (candidate == cpu) {
3598
- if (!cpu_rq(prev_cpu)->cfs.nr_running)
3599
- candidate = prev_cpu;
3602
- if (candidate == -1 || candidate == cpu) {
3603
- for_each_cpu(i, sched_domain_span(tmp)) {
3604
- if (!cpumask_test_cpu(i, &p->cpus_allowed))
3606
- if (!cpu_rq(i)->cfs.nr_running) {
3614
- if (candidate >= 0) {
3620
+ * If both cpu and prev_cpu are part of this domain,
3621
+ * cpu is a valid SD_WAKE_AFFINE target.
3623
+ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3624
+ cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3629
if (!want_sd && !want_affine)
3630
@@ -1448,23 +1510,28 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
3634
+#ifdef CONFIG_FAIR_GROUP_SCHED
3635
if (sched_feat(LB_SHARES_UPDATE)) {
3637
* Pick the largest domain to update shares over
3640
- if (affine_sd && (!tmp ||
3641
- cpumask_weight(sched_domain_span(affine_sd)) >
3642
- cpumask_weight(sched_domain_span(sd))))
3643
+ if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
3648
+ spin_unlock(&rq->lock);
3650
+ spin_lock(&rq->lock);
3655
- if (affine_sd && wake_affine(affine_sd, p, sync)) {
3659
+ if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
3660
+ return select_idle_sibling(p, cpu);
3662
+ return select_idle_sibling(p, prev_cpu);
3666
@@ -1495,10 +1562,10 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
3668
/* Now try balancing at a lower domain level of new_cpu */
3670
- weight = cpumask_weight(sched_domain_span(sd));
3671
+ weight = sd->span_weight;
3673
for_each_domain(cpu, tmp) {
3674
- if (weight <= cpumask_weight(sched_domain_span(tmp)))
3675
+ if (weight <= tmp->span_weight)
3677
if (tmp->flags & sd_flag)
3679
@@ -1506,8 +1573,6 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
3680
/* while loop will break here if sd == NULL */
3684
- rcu_read_unlock();
3687
#endif /* CONFIG_SMP */
3688
@@ -1911,28 +1976,32 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
3692
- * Share the fairness runtime between parent and child, thus the
3693
- * total amount of pressure for CPU stays equal - new tasks
3694
- * get a chance to run but frequent forkers are not allowed to
3695
- * monopolize the CPU. Note: the parent runqueue is locked,
3696
- * the child is not running yet.
3697
+ * called on fork with the child task as argument from the parent's context
3698
+ * - child not yet on the tasklist
3699
+ * - preemption disabled
3701
-static void task_new_fair(struct rq *rq, struct task_struct *p)
3702
+static void task_fork_fair(struct task_struct *p)
3704
- struct cfs_rq *cfs_rq = task_cfs_rq(p);
3705
+ struct cfs_rq *cfs_rq = task_cfs_rq(current);
3706
struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
3707
int this_cpu = smp_processor_id();
3708
+ struct rq *rq = this_rq();
3709
+ unsigned long flags;
3711
+ spin_lock_irqsave(&rq->lock, flags);
3713
+ update_rq_clock(rq);
3715
- sched_info_queued(p);
3716
+ if (unlikely(task_cpu(p) != this_cpu))
3717
+ __set_task_cpu(p, this_cpu);
3719
update_curr(cfs_rq);
3722
se->vruntime = curr->vruntime;
3723
place_entity(cfs_rq, se, 1);
3725
- /* 'curr' will be NULL if the child belongs to a different group */
3726
- if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
3727
- curr && entity_before(curr, se)) {
3728
+ if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
3730
* Upon rescheduling, sched_class::put_prev_task() will place
3731
* 'current' within the tree based on its new key value.
3732
@@ -1941,7 +2010,9 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
3733
resched_task(rq->curr);
3736
- enqueue_task_fair(rq, p, 0);
3737
+ se->vruntime -= cfs_rq->min_vruntime;
3739
+ spin_unlock_irqrestore(&rq->lock, flags);
3743
@@ -1994,30 +2065,27 @@ static void set_curr_task_fair(struct rq *rq)
3746
#ifdef CONFIG_FAIR_GROUP_SCHED
3747
-static void moved_group_fair(struct task_struct *p)
3748
+static void moved_group_fair(struct task_struct *p, int on_rq)
3750
struct cfs_rq *cfs_rq = task_cfs_rq(p);
3752
update_curr(cfs_rq);
3753
- place_entity(cfs_rq, &p->se, 1);
3755
+ place_entity(cfs_rq, &p->se, 1);
3759
-unsigned int get_rr_interval_fair(struct task_struct *task)
3760
+unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
3762
struct sched_entity *se = &task->se;
3763
- unsigned long flags;
3765
unsigned int rr_interval = 0;
3768
* Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
3771
- rq = task_rq_lock(task, &flags);
3772
if (rq->cfs.load.weight)
3773
rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
3774
- task_rq_unlock(rq, &flags);
3778
@@ -2043,11 +2111,13 @@ static const struct sched_class fair_sched_class = {
3779
.move_one_task = move_one_task_fair,
3780
.rq_online = rq_online_fair,
3781
.rq_offline = rq_offline_fair,
3783
+ .task_waking = task_waking_fair,
3786
.set_curr_task = set_curr_task_fair,
3787
.task_tick = task_tick_fair,
3788
- .task_new = task_new_fair,
3789
+ .task_fork = task_fork_fair,
3791
.prio_changed = prio_changed_fair,
3792
.switched_to = switched_to_fair,
3793
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
3794
index b133a28..93ad2e7 100644
3795
--- a/kernel/sched_idletask.c
3796
+++ b/kernel/sched_idletask.c
3801
-static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
3803
+select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
3805
return task_cpu(p); /* IDLE tasks as never migrated */
3807
@@ -97,7 +98,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
3808
check_preempt_curr(rq, p, 0);
3811
-unsigned int get_rr_interval_idle(struct task_struct *task)
3812
+unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
3816
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
3817
index a4d790c..af24fab 100644
3818
--- a/kernel/sched_rt.c
3819
+++ b/kernel/sched_rt.c
3820
@@ -194,7 +194,7 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
3824
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
3825
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
3826
static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
3828
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
3829
@@ -204,7 +204,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
3831
if (rt_rq->rt_nr_running) {
3832
if (rt_se && !on_rt_rq(rt_se))
3833
- enqueue_rt_entity(rt_se);
3834
+ enqueue_rt_entity(rt_se, false);
3835
if (rt_rq->highest_prio.curr < curr->prio)
3838
@@ -803,7 +803,7 @@ void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
3839
dec_rt_group(rt_se, rt_rq);
3842
-static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
3843
+static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
3845
struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
3846
struct rt_prio_array *array = &rt_rq->active;
3847
@@ -819,7 +819,10 @@ static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
3848
if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
3851
- list_add_tail(&rt_se->run_list, queue);
3853
+ list_add(&rt_se->run_list, queue);
3855
+ list_add_tail(&rt_se->run_list, queue);
3856
__set_bit(rt_se_prio(rt_se), array->bitmap);
3858
inc_rt_tasks(rt_se, rt_rq);
3859
@@ -856,11 +859,11 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
3863
-static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
3864
+static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
3866
dequeue_rt_stack(rt_se);
3867
for_each_sched_rt_entity(rt_se)
3868
- __enqueue_rt_entity(rt_se);
3869
+ __enqueue_rt_entity(rt_se, head);
3872
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
3873
@@ -871,21 +874,22 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
3874
struct rt_rq *rt_rq = group_rt_rq(rt_se);
3876
if (rt_rq && rt_rq->rt_nr_running)
3877
- __enqueue_rt_entity(rt_se);
3878
+ __enqueue_rt_entity(rt_se, false);
3883
* Adding/removing a task to/from a priority array:
3885
-static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
3887
+enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head)
3889
struct sched_rt_entity *rt_se = &p->rt;
3894
- enqueue_rt_entity(rt_se);
3895
+ enqueue_rt_entity(rt_se, head);
3897
if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
3898
enqueue_pushable_task(rq, p);
3899
@@ -938,10 +942,9 @@ static void yield_task_rt(struct rq *rq)
3901
static int find_lowest_rq(struct task_struct *task);
3903
-static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
3905
+select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
3907
- struct rq *rq = task_rq(p);
3909
if (sd_flag != SD_BALANCE_WAKE)
3910
return smp_processor_id();
3912
@@ -1485,7 +1488,7 @@ static void post_schedule_rt(struct rq *rq)
3913
* If we are not running and we are not going to reschedule soon, we should
3914
* try to push tasks away now
3916
-static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
3917
+static void task_woken_rt(struct rq *rq, struct task_struct *p)
3919
if (!task_running(rq, p) &&
3920
!test_tsk_need_resched(rq->curr) &&
3921
@@ -1734,7 +1737,7 @@ static void set_curr_task_rt(struct rq *rq)
3922
dequeue_pushable_task(rq, p);
3925
-unsigned int get_rr_interval_rt(struct task_struct *task)
3926
+unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
3929
* Time slice is 0 for SCHED_FIFO tasks
3930
@@ -1766,7 +1769,7 @@ static const struct sched_class rt_sched_class = {
3931
.rq_offline = rq_offline_rt,
3932
.pre_schedule = pre_schedule_rt,
3933
.post_schedule = post_schedule_rt,
3934
- .task_wake_up = task_wake_up_rt,
3935
+ .task_woken = task_woken_rt,
3936
.switched_from = switched_from_rt,
3939
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
3940
index 0cccb6c..22cf21e 100644
3941
--- a/kernel/trace/ftrace.c
3942
+++ b/kernel/trace/ftrace.c
3943
@@ -369,11 +369,18 @@ static int function_stat_show(struct seq_file *m, void *v)
3945
struct ftrace_profile *rec = v;
3946
char str[KSYM_SYMBOL_LEN];
3948
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3949
- static DEFINE_MUTEX(mutex);
3950
static struct trace_seq s;
3951
unsigned long long avg;
3953
+ mutex_lock(&ftrace_profile_lock);
3955
+ /* we raced with function_profile_reset() */
3956
+ if (unlikely(rec->counter == 0)) {
3961
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
3962
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
3963
@@ -383,17 +390,17 @@ static int function_stat_show(struct seq_file *m, void *v)
3965
do_div(avg, rec->counter);
3967
- mutex_lock(&mutex);
3969
trace_print_graph_duration(rec->time, &s);
3970
trace_seq_puts(&s, " ");
3971
trace_print_graph_duration(avg, &s);
3972
trace_print_seq(m, &s);
3973
- mutex_unlock(&mutex);
3977
+ mutex_unlock(&ftrace_profile_lock);
3983
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
3984
@@ -1473,6 +1480,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
3986
return t_hash_start(m, pos);
3987
iter->flags |= FTRACE_ITER_PRINTALL;
3988
+ /* reset in case of seek/pread */
3989
+ iter->flags &= ~FTRACE_ITER_HASH;
3993
@@ -2393,7 +2402,7 @@ static const struct file_operations ftrace_filter_fops = {
3994
.open = ftrace_filter_open,
3996
.write = ftrace_filter_write,
3997
- .llseek = ftrace_regex_lseek,
3998
+ .llseek = no_llseek,
3999
.release = ftrace_filter_release,
4002
diff --git a/mm/bounce.c b/mm/bounce.c
4003
index a2b76a5..1d5fa08 100644
4006
@@ -115,8 +115,8 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
4008
vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
4010
- flush_dcache_page(tovec->bv_page);
4011
bounce_copy_vec(tovec, vfrom);
4012
+ flush_dcache_page(tovec->bv_page);
4016
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4017
index 2047465..6d27a5b 100644
4018
--- a/mm/memory_hotplug.c
4019
+++ b/mm/memory_hotplug.c
4020
@@ -551,19 +551,19 @@ static inline int pageblock_free(struct page *page)
4021
/* Return the start of the next active pageblock after a given page */
4022
static struct page *next_active_pageblock(struct page *page)
4024
- int pageblocks_stride;
4026
/* Ensure the starting page is pageblock-aligned */
4027
BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
4029
- /* Move forward by at least 1 * pageblock_nr_pages */
4030
- pageblocks_stride = 1;
4032
/* If the entire pageblock is free, move to the end of free page */
4033
- if (pageblock_free(page))
4034
- pageblocks_stride += page_order(page) - pageblock_order;
4035
+ if (pageblock_free(page)) {
4037
+ /* be careful. we don't have locks, page_order can be changed.*/
4038
+ order = page_order(page);
4039
+ if ((order < MAX_ORDER) && (order >= pageblock_order))
4040
+ return page + (1 << order);
4043
- return page + (pageblocks_stride * pageblock_nr_pages);
4044
+ return page + pageblock_nr_pages;
4047
/* Checks if this range of memory is likely to be hot-removable. */
4048
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
4049
index 315ead3..cfef331 100644
4050
--- a/net/irda/irlan/irlan_common.c
4051
+++ b/net/irda/irlan/irlan_common.c
4052
@@ -1101,7 +1101,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
4053
memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
4054
le16_to_cpus(&val_len); n+=2;
4056
- if (val_len > 1016) {
4057
+ if (val_len >= 1016) {
4058
IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
4059
return -RSP_INVALID_COMMAND_FORMAT;
4061
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
4062
index 2370ab4..4c32700 100644
4063
--- a/net/sunrpc/auth_gss/auth_gss.c
4064
+++ b/net/sunrpc/auth_gss/auth_gss.c
4065
@@ -717,17 +717,18 @@ gss_pipe_release(struct inode *inode)
4066
struct rpc_inode *rpci = RPC_I(inode);
4067
struct gss_upcall_msg *gss_msg;
4070
spin_lock(&inode->i_lock);
4071
- while (!list_empty(&rpci->in_downcall)) {
4072
+ list_for_each_entry(gss_msg, &rpci->in_downcall, list) {
4074
- gss_msg = list_entry(rpci->in_downcall.next,
4075
- struct gss_upcall_msg, list);
4076
+ if (!list_empty(&gss_msg->msg.list))
4078
gss_msg->msg.errno = -EPIPE;
4079
atomic_inc(&gss_msg->count);
4080
__gss_unhash_msg(gss_msg);
4081
spin_unlock(&inode->i_lock);
4082
gss_release_msg(gss_msg);
4083
- spin_lock(&inode->i_lock);
4086
spin_unlock(&inode->i_lock);
4088
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
4089
index 27a2378..ea1e6de 100644
4090
--- a/net/sunrpc/rpc_pipe.c
4091
+++ b/net/sunrpc/rpc_pipe.c
4092
@@ -47,7 +47,7 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head,
4095
msg = list_entry(head->next, struct rpc_pipe_msg, list);
4096
- list_del(&msg->list);
4097
+ list_del_init(&msg->list);
4100
} while (!list_empty(head));
4101
@@ -207,7 +207,7 @@ rpc_pipe_release(struct inode *inode, struct file *filp)
4103
spin_lock(&inode->i_lock);
4104
msg->errno = -EAGAIN;
4105
- list_del(&msg->list);
4106
+ list_del_init(&msg->list);
4107
spin_unlock(&inode->i_lock);
4108
rpci->ops->destroy_msg(msg);
4110
@@ -267,7 +267,7 @@ rpc_pipe_read(struct file *filp, char __user *buf, size_t len, loff_t *offset)
4111
if (res < 0 || msg->len == msg->copied) {
4112
filp->private_data = NULL;
4113
spin_lock(&inode->i_lock);
4114
- list_del(&msg->list);
4115
+ list_del_init(&msg->list);
4116
spin_unlock(&inode->i_lock);
4117
rpci->ops->destroy_msg(msg);
4119
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
4120
index 6a60c5a..62cfc0c 100644
4121
--- a/net/wireless/wext-compat.c
4122
+++ b/net/wireless/wext-compat.c
4123
@@ -1358,6 +1358,9 @@ int cfg80211_wext_giwessid(struct net_device *dev,
4125
struct wireless_dev *wdev = dev->ieee80211_ptr;
4130
switch (wdev->iftype) {
4131
case NL80211_IFTYPE_ADHOC:
4132
return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
4133
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
4134
index 60fe577..fddcf9c 100644
4135
--- a/net/wireless/wext.c
4136
+++ b/net/wireless/wext.c
4137
@@ -854,6 +854,22 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
4141
+ if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
4143
+ * If this is a GET, but not NOMAX, it means that the extra
4144
+ * data is not bounded by userspace, but by max_tokens. Thus
4145
+ * set the length to max_tokens. This matches the extra data
4147
+ * The driver should fill it with the number of tokens it
4148
+ * provided, and it may check iwp->length rather than having
4149
+ * knowledge of max_tokens. If the driver doesn't change the
4150
+ * iwp->length, this ioctl just copies back max_token tokens
4151
+ * filled with zeroes. Hopefully the driver isn't claiming
4152
+ * them to be valid data.
4154
+ iwp->length = descr->max_tokens;
4157
err = handler(dev, info, (union iwreq_data *) iwp, extra);
4159
iwp->length += essid_compat;
4160
diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
4161
index d0d721c..1f133fe 100644
4162
--- a/sound/core/seq/oss/seq_oss_init.c
4163
+++ b/sound/core/seq/oss/seq_oss_init.c
4164
@@ -280,13 +280,10 @@ snd_seq_oss_open(struct file *file, int level)
4168
- snd_seq_oss_writeq_delete(dp->writeq);
4169
- snd_seq_oss_readq_delete(dp->readq);
4170
snd_seq_oss_synth_cleanup(dp);
4171
snd_seq_oss_midi_cleanup(dp);
4173
delete_seq_queue(dp->queue);
4179
@@ -349,8 +346,10 @@ create_port(struct seq_oss_devinfo *dp)
4181
delete_port(struct seq_oss_devinfo *dp)
4184
+ if (dp->port < 0) {
4189
debug_printk(("delete_port %i\n", dp->port));
4190
return snd_seq_event_port_detach(dp->cseq, dp->port);
4191
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4192
index 7b4e74d..06c118c 100644
4193
--- a/sound/pci/hda/patch_realtek.c
4194
+++ b/sound/pci/hda/patch_realtek.c
4195
@@ -6589,7 +6589,7 @@ static struct hda_input_mux alc883_lenovo_nb0763_capture_source = {
4200
+ { "Int Mic", 0x1 },
4204
@@ -8038,8 +8038,8 @@ static struct snd_kcontrol_new alc883_lenovo_nb0763_mixer[] = {
4205
HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
4206
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
4207
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
4208
- HDA_CODEC_VOLUME("iMic Playback Volume", 0x0b, 0x1, HDA_INPUT),
4209
- HDA_CODEC_MUTE("iMic Playback Switch", 0x0b, 0x1, HDA_INPUT),
4210
+ HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
4211
+ HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
4215
@@ -12389,6 +12389,9 @@ static int alc268_new_analog_output(struct alc_spec *spec, hda_nid_t nid,
4219
+ case 0x1a: /* ALC259/269 only */
4220
+ case 0x1b: /* ALC259/269 only */
4221
+ case 0x21: /* ALC269vb has this pin, too */
4225
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
4226
index a31a8cd..3c6d141 100644
4227
--- a/tools/perf/util/callchain.h
4228
+++ b/tools/perf/util/callchain.h
4229
@@ -49,6 +49,7 @@ static inline void callchain_init(struct callchain_node *node)
4230
INIT_LIST_HEAD(&node->children);
4231
INIT_LIST_HEAD(&node->val);
4233
+ node->children_hit = 0;
4234
node->parent = NULL;