~ubuntu-branches/ubuntu/karmic/linux-ports/karmic

« back to all changes in this revision

Viewing changes to drivers/acpi/processor_perflib.c

  • Committer: Bazaar Package Importer
  • Author(s): Luke Yelavich, Luke Yelavich, Michael Casadevall, Tim Gardner, Upstream Kernel Changes
  • Date: 2009-05-06 18:18:55 UTC
  • Revision ID: james.westby@ubuntu.com-20090506181855-t00baeevpnvd9o7a
Tags: 2.6.30-1.1
[ Luke Yelavich ]
* initial release for karmic
* SAUCE: rebase-ports - adjust for the karmic ports kernel
* SAUCE: rebase-ports - also remove abi dirs/files on rebase
* Update configs after rebase against mainline Jaunty tree
* [Config] Disable CONFIG_BLK_DEV_UB and CONFIG_USB_LIBUSUAL as per
  mainline jaunty
* forward-port patch to drbd for powerpc compilation
* [Config] disable CONFIG_LENOVO_SL_LAPTOP for i386 due to FTBFS
* add .o files found in arch/powerpc/lib to all powerpc kernel header
  packages
* [Config] enable CONFIG_DRM_I915_KMS for i386 as per karmic mainline

[ Michael Casadevall ]

* Disable kgdb on sparc64
* [sparc] [Config] Disable GPIO LEDS
* [ia64] Rename -ia64-generic to -ia64 in line with other architectures
* Correct kernel image path for sparc builds
* [hppa] Fix HPPA config files to build modules for all udebian

Rebase on top of karmic mainline 2.6.30-1.1

[ Tim Gardner ]

* [Config] armel: disable staging drivers, fixes FTBS
* [Config] armel imx51: Disable CONFIG_MTD_NAND_MXC, fixes FTBS

[ Upstream Kernel Changes ]

* mpt2sas: Change reset_type enum to avoid namespace collision.
  Submitted upstream.

* Initial release after rebasing against v2.6.30-rc3

Show diffs side-by-side

added added

removed removed

Lines of Context:
31
31
#include <linux/init.h>
32
32
#include <linux/cpufreq.h>
33
33
 
34
 
#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
35
 
#include <linux/proc_fs.h>
36
 
#include <linux/seq_file.h>
37
 
#include <linux/mutex.h>
38
 
 
39
 
#include <asm/uaccess.h>
40
 
#endif
41
 
 
42
34
#ifdef CONFIG_X86
43
35
#include <asm/cpufeature.h>
44
36
#endif
434
426
 
435
427
EXPORT_SYMBOL(acpi_processor_notify_smm);
436
428
 
437
 
#ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
438
 
/* /proc/acpi/processor/../performance interface (DEPRECATED) */
439
 
 
440
 
static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
441
 
static struct file_operations acpi_processor_perf_fops = {
442
 
        .owner = THIS_MODULE,
443
 
        .open = acpi_processor_perf_open_fs,
444
 
        .read = seq_read,
445
 
        .llseek = seq_lseek,
446
 
        .release = single_release,
447
 
};
448
 
 
449
 
static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
450
 
{
451
 
        struct acpi_processor *pr = seq->private;
452
 
        int i;
453
 
 
454
 
 
455
 
        if (!pr)
456
 
                goto end;
457
 
 
458
 
        if (!pr->performance) {
459
 
                seq_puts(seq, "<not supported>\n");
460
 
                goto end;
461
 
        }
462
 
 
463
 
        seq_printf(seq, "state count:             %d\n"
464
 
                   "active state:            P%d\n",
465
 
                   pr->performance->state_count, pr->performance->state);
466
 
 
467
 
        seq_puts(seq, "states:\n");
468
 
        for (i = 0; i < pr->performance->state_count; i++)
469
 
                seq_printf(seq,
470
 
                           "   %cP%d:                  %d MHz, %d mW, %d uS\n",
471
 
                           (i == pr->performance->state ? '*' : ' '), i,
472
 
                           (u32) pr->performance->states[i].core_frequency,
473
 
                           (u32) pr->performance->states[i].power,
474
 
                           (u32) pr->performance->states[i].transition_latency);
475
 
 
476
 
      end:
477
 
        return 0;
478
 
}
479
 
 
480
 
static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
481
 
{
482
 
        return single_open(file, acpi_processor_perf_seq_show,
483
 
                           PDE(inode)->data);
484
 
}
485
 
 
486
 
static void acpi_cpufreq_add_file(struct acpi_processor *pr)
487
 
{
488
 
        struct acpi_device *device = NULL;
489
 
 
490
 
 
491
 
        if (acpi_bus_get_device(pr->handle, &device))
492
 
                return;
493
 
 
494
 
        /* add file 'performance' [R/W] */
495
 
        proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO,
496
 
                         acpi_device_dir(device),
497
 
                         &acpi_processor_perf_fops, acpi_driver_data(device));
498
 
        return;
499
 
}
500
 
 
501
 
static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
502
 
{
503
 
        struct acpi_device *device = NULL;
504
 
 
505
 
 
506
 
        if (acpi_bus_get_device(pr->handle, &device))
507
 
                return;
508
 
 
509
 
        /* remove file 'performance' */
510
 
        remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
511
 
                          acpi_device_dir(device));
512
 
 
513
 
        return;
514
 
}
515
 
 
516
 
#else
517
 
static void acpi_cpufreq_add_file(struct acpi_processor *pr)
518
 
{
519
 
        return;
520
 
}
521
 
static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
522
 
{
523
 
        return;
524
 
}
525
 
#endif                          /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
526
 
 
527
429
static int acpi_processor_get_psd(struct acpi_processor *pr)
528
430
{
529
431
        int result = 0;
577
479
                goto end;
578
480
        }
579
481
 
 
482
        if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
 
483
            pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
 
484
            pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
 
485
                printk(KERN_ERR PREFIX "Invalid _PSD:coord_type\n");
 
486
                result = -EFAULT;
 
487
                goto end;
 
488
        }
580
489
end:
581
490
        kfree(buffer.pointer);
582
491
        return result;
588
497
        int count, count_target;
589
498
        int retval = 0;
590
499
        unsigned int i, j;
591
 
        cpumask_t covered_cpus;
 
500
        cpumask_var_t covered_cpus;
592
501
        struct acpi_processor *pr;
593
502
        struct acpi_psd_package *pdomain;
594
503
        struct acpi_processor *match_pr;
595
504
        struct acpi_psd_package *match_pdomain;
596
505
 
 
506
        if (!alloc_cpumask_var(&covered_cpus, GFP_KERNEL))
 
507
                return -ENOMEM;
 
508
 
597
509
        mutex_lock(&performance_mutex);
598
510
 
599
 
        retval = 0;
600
 
 
601
 
        /* Call _PSD for all CPUs */
 
511
        /*
 
512
         * Check if another driver has already registered, and abort before
 
513
         * changing pr->performance if it has. Check input data as well.
 
514
         */
602
515
        for_each_possible_cpu(i) {
603
516
                pr = per_cpu(processors, i);
604
517
                if (!pr) {
608
521
 
609
522
                if (pr->performance) {
610
523
                        retval = -EBUSY;
611
 
                        continue;
 
524
                        goto err_out;
612
525
                }
613
526
 
614
 
                if (!performance || !percpu_ptr(performance, i)) {
 
527
                if (!performance || !per_cpu_ptr(performance, i)) {
615
528
                        retval = -EINVAL;
 
529
                        goto err_out;
 
530
                }
 
531
        }
 
532
 
 
533
        /* Call _PSD for all CPUs */
 
534
        for_each_possible_cpu(i) {
 
535
                pr = per_cpu(processors, i);
 
536
                if (!pr)
616
537
                        continue;
617
 
                }
618
538
 
619
 
                pr->performance = percpu_ptr(performance, i);
620
 
                cpu_set(i, pr->performance->shared_cpu_map);
 
539
                pr->performance = per_cpu_ptr(performance, i);
 
540
                cpumask_set_cpu(i, pr->performance->shared_cpu_map);
621
541
                if (acpi_processor_get_psd(pr)) {
622
542
                        retval = -EINVAL;
623
543
                        continue;
630
550
         * Now that we have _PSD data from all CPUs, lets setup P-state 
631
551
         * domain info.
632
552
         */
633
 
        for_each_possible_cpu(i) {
634
 
                pr = per_cpu(processors, i);
635
 
                if (!pr)
636
 
                        continue;
637
 
 
638
 
                /* Basic validity check for domain info */
639
 
                pdomain = &(pr->performance->domain_info);
640
 
                if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
641
 
                    (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
642
 
                        retval = -EINVAL;
643
 
                        goto err_ret;
644
 
                }
645
 
                if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
646
 
                    pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
647
 
                    pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
648
 
                        retval = -EINVAL;
649
 
                        goto err_ret;
650
 
                }
651
 
        }
652
 
 
653
 
        cpus_clear(covered_cpus);
654
 
        for_each_possible_cpu(i) {
655
 
                pr = per_cpu(processors, i);
656
 
                if (!pr)
657
 
                        continue;
658
 
 
659
 
                if (cpu_isset(i, covered_cpus))
660
 
                        continue;
661
 
 
662
 
                pdomain = &(pr->performance->domain_info);
663
 
                cpu_set(i, pr->performance->shared_cpu_map);
664
 
                cpu_set(i, covered_cpus);
 
553
        cpumask_clear(covered_cpus);
 
554
        for_each_possible_cpu(i) {
 
555
                pr = per_cpu(processors, i);
 
556
                if (!pr)
 
557
                        continue;
 
558
 
 
559
                if (cpumask_test_cpu(i, covered_cpus))
 
560
                        continue;
 
561
 
 
562
                pdomain = &(pr->performance->domain_info);
 
563
                cpumask_set_cpu(i, pr->performance->shared_cpu_map);
 
564
                cpumask_set_cpu(i, covered_cpus);
665
565
                if (pdomain->num_processors <= 1)
666
566
                        continue;
667
567
 
699
599
                                goto err_ret;
700
600
                        }
701
601
 
702
 
                        cpu_set(j, covered_cpus);
703
 
                        cpu_set(j, pr->performance->shared_cpu_map);
 
602
                        cpumask_set_cpu(j, covered_cpus);
 
603
                        cpumask_set_cpu(j, pr->performance->shared_cpu_map);
704
604
                        count++;
705
605
                }
706
606
 
718
618
 
719
619
                        match_pr->performance->shared_type = 
720
620
                                        pr->performance->shared_type;
721
 
                        match_pr->performance->shared_cpu_map =
722
 
                                pr->performance->shared_cpu_map;
 
621
                        cpumask_copy(match_pr->performance->shared_cpu_map,
 
622
                                     pr->performance->shared_cpu_map);
723
623
                }
724
624
        }
725
625
 
731
631
 
732
632
                /* Assume no coordination on any error parsing domain info */
733
633
                if (retval) {
734
 
                        cpus_clear(pr->performance->shared_cpu_map);
735
 
                        cpu_set(i, pr->performance->shared_cpu_map);
 
634
                        cpumask_clear(pr->performance->shared_cpu_map);
 
635
                        cpumask_set_cpu(i, pr->performance->shared_cpu_map);
736
636
                        pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
737
637
                }
738
638
                pr->performance = NULL; /* Will be set for real in register */
739
639
        }
740
640
 
 
641
err_out:
741
642
        mutex_unlock(&performance_mutex);
 
643
        free_cpumask_var(covered_cpus);
742
644
        return retval;
743
645
}
744
646
EXPORT_SYMBOL(acpi_processor_preregister_performance);
745
647
 
746
 
 
747
648
int
748
649
acpi_processor_register_performance(struct acpi_processor_performance
749
650
                                    *performance, unsigned int cpu)
750
651
{
751
652
        struct acpi_processor *pr;
752
653
 
753
 
 
754
654
        if (!(acpi_processor_ppc_status & PPC_REGISTERED))
755
655
                return -EINVAL;
756
656
 
777
677
                return -EIO;
778
678
        }
779
679
 
780
 
        acpi_cpufreq_add_file(pr);
781
 
 
782
680
        mutex_unlock(&performance_mutex);
783
681
        return 0;
784
682
}
791
689
{
792
690
        struct acpi_processor *pr;
793
691
 
794
 
 
795
692
        mutex_lock(&performance_mutex);
796
693
 
797
694
        pr = per_cpu(processors, cpu);
804
701
                kfree(pr->performance->states);
805
702
        pr->performance = NULL;
806
703
 
807
 
        acpi_cpufreq_remove_file(pr);
808
 
 
809
704
        mutex_unlock(&performance_mutex);
810
705
 
811
706
        return;