~ubuntu-branches/ubuntu/lucid/boinc/lucid

« back to all changes in this revision

Viewing changes to client/cpu_sched.C

  • Committer: Bazaar Package Importer
  • Author(s): Frank S. Thomas, Frank S. Thomas
  • Date: 2008-05-31 08:02:47 UTC
  • mfrom: (1.1.8 upstream)
  • Revision ID: james.westby@ubuntu.com-20080531080247-4ce890lp2rc768cr
Tags: 6.2.7-1
[ Frank S. Thomas ]
* New upstream release.
  - BOINC Manager: Redraw disk usage charts immediately after connecting to
    a (different) client. (closes: 463823)
* debian/copyright:
  - Added the instructions from debian/README.Debian-source about how
    repackaged BOINC tarballs can be reproduced because DevRef now
    recommends to put this here instead of in the afore-mentioned file.
  - Updated for the new release.
* Removed the obsolete debian/README.Debian-source.
* For consistency upstream renamed the core client and the command tool
  ("boinc_client" to "boinc" and "boinc_cmd" to "boinccmd"). Done the same
  in all packages and created symlinks with the old names for the binaries
  and man pages. Also added an entry in debian/boinc-client.NEWS explaining
  this change.
* debian/rules: Do not list Makefile.ins in the clean target individually,
  just remove all that can be found.

Show diffs side-by-side

added added

removed removed

Lines of Context:
15
15
// To view the GNU Lesser General Public License visit
16
16
// http://www.gnu.org/copyleft/lesser.html
17
17
// or write to the Free Software Foundation, Inc.,
18
 
// 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
18
// 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
19
19
 
20
20
// CPU scheduling logic.
21
21
//
41
41
#include "boinc_win.h"
42
42
#endif
43
43
 
44
 
#ifdef SIM
45
 
#include "sim.h"
46
 
#else
47
 
#include "client_state.h"
48
 
#endif
49
 
#include "client_msgs.h"
 
44
#include <string>
 
45
#include <cstring>
 
46
 
50
47
#include "str_util.h"
51
48
#include "util.h"
52
49
#include "error_numbers.h"
 
50
#include "coproc.h"
 
51
 
 
52
#include "client_msgs.h"
53
53
#include "log_flags.h"
54
54
 
 
55
#ifdef SIM
 
56
#include "sim.h"
 
57
#else
 
58
#include "client_state.h"
 
59
#endif
 
60
 
55
61
using std::vector;
56
62
 
57
63
#define MAX_STD   (86400)
64
70
#define DEADLINE_CUSHION    0
65
71
    // try to finish jobs this much in advance of their deadline
66
72
 
 
73
bool CLIENT_STATE::sufficient_coprocs(APP_VERSION& av) {
 
74
    for (unsigned int i=0; i<av.coprocs.coprocs.size(); i++) {
 
75
        COPROC* cp = av.coprocs.coprocs[i];
 
76
        COPROC* cp2 = coprocs.lookup(cp->type);
 
77
        if (!cp2) {
 
78
            msg_printf(av.project, MSG_INFO,
 
79
                "Missing a %s coprocessor", cp->type
 
80
            );
 
81
            return false;
 
82
        }
 
83
        if (cp2->used + cp->count > cp2->count) {
 
84
            if (log_flags.cpu_sched_debug) {
 
85
                msg_printf(NULL, MSG_INFO,
 
86
                    "[cpu_sched_debug] insufficient coproc %s (%d + %d > %d)",
 
87
                    cp2->type, cp2->used, cp->count, cp2->count
 
88
                );
 
89
            }
 
90
            return false;
 
91
        }
 
92
    }
 
93
    return true;
 
94
}
 
95
 
 
96
void CLIENT_STATE::reserve_coprocs(APP_VERSION& av) {
 
97
    for (unsigned int i=0; i<av.coprocs.coprocs.size(); i++) {
 
98
        COPROC* cp = av.coprocs.coprocs[i];
 
99
        COPROC* cp2 = coprocs.lookup(cp->type);
 
100
        if (!cp2) continue;
 
101
        if (log_flags.cpu_sched_debug) {
 
102
            msg_printf(NULL, MSG_INFO,
 
103
                "[cpu_sched_debug] reserving %d of coproc %s",
 
104
                cp->count, cp2->type
 
105
            );
 
106
        }
 
107
        cp2->used += cp->count;
 
108
    }
 
109
}
 
110
 
 
111
void CLIENT_STATE::free_coprocs(APP_VERSION& av) {
 
112
    for (unsigned int i=0; i<av.coprocs.coprocs.size(); i++) {
 
113
        COPROC* cp = av.coprocs.coprocs[i];
 
114
        COPROC* cp2 = coprocs.lookup(cp->type);
 
115
        if (!cp2) continue;
 
116
        if (log_flags.cpu_sched_debug) {
 
117
            msg_printf(NULL, MSG_INFO,
 
118
                "[cpu_sched_debug] freeing %d of coproc %s",
 
119
                cp->count, cp2->type
 
120
            );
 
121
        }
 
122
        cp2->used -= cp->count;
 
123
    }
 
124
}
 
125
 
67
126
static bool more_preemptable(ACTIVE_TASK* t0, ACTIVE_TASK* t1) {
68
127
    // returning true means t1 is more preemptable than t0,
69
128
    // the "largest" result is at the front of a heap, 
205
264
        if (!rp->runnable()) continue;
206
265
        if (rp->project->non_cpu_intensive) continue;
207
266
        if (rp->already_selected) continue;
208
 
        if (!rp->project->deadlines_missed) continue;
 
267
        if (!rp->project->deadlines_missed && rp->project->duration_correction_factor < 90.0) continue;
 
268
            // treat projects with DCF>90 as if they had deadline misses
209
269
 
210
 
        if (!best_result || rp->report_deadline<best_result->report_deadline) {
 
270
        bool new_best = false;
 
271
        if (best_result) {
 
272
            if (rp->report_deadline < best_result->report_deadline) {
 
273
                new_best = true;
 
274
            }
 
275
        } else {
 
276
            new_best = true;
 
277
        }
 
278
        if (new_best) {
211
279
            best_result = rp;
212
280
            best_atp = lookup_active_task_by_result(rp);
213
281
            continue;
223
291
        if (best_atp && !atp) continue;
224
292
        if (rp->estimated_cpu_time_remaining(false)
225
293
            < best_result->estimated_cpu_time_remaining(false)
 
294
            || (!best_atp && atp)
226
295
        ) {
227
296
            best_result = rp;
228
297
            best_atp = atp;
396
465
 
397
466
// Decide whether to run the CPU scheduler.
398
467
// This is called periodically.
399
 
// Scheduled tasks are placed in order of urgency for scheduling in the ordered_scheduled_results vector
 
468
// Scheduled tasks are placed in order of urgency for scheduling
 
469
// in the ordered_scheduled_results vector
400
470
//
401
471
bool CLIENT_STATE::possibly_schedule_cpus() {
402
472
    double elapsed_time;
449
519
    }
450
520
}
451
521
 
 
522
static bool schedule_if_possible(
 
523
    RESULT* rp, double& ncpus_used, double& ram_left, double rrs, double expected_payoff
 
524
) {
 
525
        ACTIVE_TASK* atp;
 
526
 
 
527
        atp = gstate.lookup_active_task_by_result(rp);
 
528
    if (!atp || atp->task_state() == PROCESS_UNINITIALIZED) {
 
529
        if (!gstate.sufficient_coprocs(*rp->avp)) {
 
530
            if (log_flags.cpu_sched_debug) {
 
531
                        msg_printf(rp->project, MSG_INFO,
 
532
                                "[cpu_sched_debug] insufficient coprocessors for %s", rp->name
 
533
                );
 
534
            }
 
535
            return false;
 
536
        }
 
537
    }
 
538
        if (atp) {
 
539
        // see if it fits in available RAM
 
540
        //
 
541
                if (atp->procinfo.working_set_size_smoothed > ram_left) {
 
542
                        if (log_flags.cpu_sched_debug) {
 
543
                                msg_printf(rp->project, MSG_INFO,
 
544
                                        "[cpu_sched_debug]  %s misses deadline but too large: %.2fMB",
 
545
                                        rp->name, atp->procinfo.working_set_size_smoothed/MEGA
 
546
                                );
 
547
                        }
 
548
            atp->too_large = true;
 
549
                        return false;
 
550
                }
 
551
        atp->too_large = false;
 
552
        
 
553
        if (gstate.retry_shmem_time > gstate.now) {
 
554
            if (atp->app_client_shm.shm == NULL) {
 
555
                atp->needs_shmem = true;
 
556
                return false;
 
557
            }
 
558
            atp->needs_shmem = false;
 
559
        }
 
560
                ram_left -= atp->procinfo.working_set_size_smoothed;
 
561
    }
 
562
    if (log_flags.cpu_sched_debug) {
 
563
        msg_printf(rp->project, MSG_INFO,
 
564
                        "[cpu_sched_debug] scheduling %s",
 
565
                        rp->name
 
566
                );
 
567
    }
 
568
    ncpus_used += rp->avp->avg_ncpus;
 
569
    rp->project->anticipated_debt -= (rp->project->resource_share / rrs) * expected_payoff;
 
570
    return true;
 
571
}
 
572
 
452
573
// CPU scheduler - decide which results to run.
453
574
// output: sets ordered_scheduled_result.
454
575
//
455
576
void CLIENT_STATE::schedule_cpus() {
456
577
    RESULT* rp;
457
578
    PROJECT* p;
458
 
        ACTIVE_TASK* atp;
459
 
    double expected_pay_off;
 
579
    double expected_payoff;
460
580
    unsigned int i;
461
581
    double rrs = runnable_resource_share();
 
582
    double ncpus_used;
462
583
 
463
584
    if (log_flags.cpu_sched_debug) {
464
585
                msg_printf(0, MSG_INFO, "[cpu_sched_debug] schedule_cpus(): start");
465
586
    }
466
587
 
467
 
        // do round-robin simulation to find what results miss deadline,
 
588
        // do round-robin simulation to find what results miss deadline
468
589
    //
469
590
    rr_simulation();
470
591
    if (log_flags.cpu_sched_debug) {
490
611
                active_tasks.active_tasks[i]->too_large = false;
491
612
        }
492
613
 
493
 
    expected_pay_off = global_prefs.cpu_scheduling_period();
 
614
    expected_payoff = global_prefs.cpu_scheduling_period();
494
615
    ordered_scheduled_results.clear();
495
616
        double ram_left = available_ram();
496
617
 
499
620
#ifdef SIM
500
621
    if (!cpu_sched_rr_only) {
501
622
#endif
502
 
    while ((int)ordered_scheduled_results.size() < ncpus) {
 
623
    ncpus_used = 0;
 
624
    while (ncpus_used < ncpus) {
503
625
        rp = earliest_deadline_result();
504
626
        if (!rp) break;
505
627
        rp->already_selected = true;
506
628
 
507
 
                // see if it fits in available RAM
508
 
                //
509
 
                atp = lookup_active_task_by_result(rp);
510
 
                if (atp) {
511
 
                        if (atp->procinfo.working_set_size_smoothed > ram_left) {
512
 
                                if (log_flags.cpu_sched_debug) {
513
 
                                        msg_printf(rp->project, MSG_INFO,
514
 
                                                "[cpu_sched_debug]  %s misses deadline but too large: %.2fMB",
515
 
                                                rp->name, atp->procinfo.working_set_size_smoothed/MEGA
516
 
                                        );
517
 
                                }
518
 
                atp->too_large = true;
519
 
                                continue;
520
 
                        }
521
 
            atp->too_large = false;
522
 
            
523
 
            // TODO: merge this chunk of code with its clone
524
 
            if (gstate.retry_shmem_time > gstate.now) {
525
 
                if (atp->app_client_shm.shm == NULL) {
526
 
                    atp->needs_shmem = true;
527
 
                    continue;
528
 
                }
529
 
                atp->needs_shmem = false;
530
 
            }
531
 
                        ram_left -= atp->procinfo.working_set_size_smoothed;
532
 
                }
 
629
        if (!schedule_if_possible(rp, ncpus_used, ram_left, rrs, expected_payoff)) continue;
533
630
 
534
 
        rp->project->anticipated_debt -= (rp->project->resource_share / rrs) * expected_pay_off;
535
631
        rp->project->deadlines_missed--;
536
632
        rp->edf_scheduled = true;
537
 
        if (log_flags.cpu_sched_debug) {
538
 
            msg_printf(rp->project, MSG_INFO,
539
 
                                "[cpu_sched_debug] scheduling (deadline) %s",
540
 
                                rp->name
541
 
                        );
542
 
        }
543
633
        ordered_scheduled_results.push_back(rp);
544
634
    }
545
635
#ifdef SIM
548
638
 
549
639
    // Next, choose results from projects with large debt
550
640
    //
551
 
    while ((int)ordered_scheduled_results.size() < ncpus) {
 
641
    while (ncpus_used < ncpus) {
552
642
        assign_results_to_projects();
553
643
        rp = largest_debt_project_best_result();
554
644
        if (!rp) break;
555
 
                atp = lookup_active_task_by_result(rp);
556
 
                if (atp) {
557
 
                        if (atp->procinfo.working_set_size_smoothed > ram_left) {
558
 
                                if (log_flags.cpu_sched_debug) {
559
 
                                        msg_printf(NULL, MSG_INFO,
560
 
                                                "[cpu_sched_debug]  %s too large: %.2fMB",
561
 
                                                rp->name, atp->procinfo.working_set_size_smoothed/MEGA
562
 
                                        );
563
 
                                }
564
 
                atp->too_large = true;
565
 
                                continue;
566
 
                        }
567
 
            atp->too_large = false;
568
 
 
569
 
            // don't select if it would need a new shared-mem seg
570
 
            // and we're out of them
571
 
            //
572
 
            if (gstate.retry_shmem_time > gstate.now) {
573
 
                if (atp->app_client_shm.shm == NULL) {
574
 
                    atp->needs_shmem = true;
575
 
                    continue;
576
 
                }
577
 
                atp->needs_shmem = false;
578
 
            }
579
 
                        ram_left -= atp->procinfo.working_set_size_smoothed;
580
 
                }
581
 
        double xx = (rp->project->resource_share / rrs) * expected_pay_off;
582
 
        rp->project->anticipated_debt -= xx;
583
 
        if (log_flags.cpu_sched_debug) {
584
 
            msg_printf(NULL, MSG_INFO, "[cpu_sched_debug] scheduling (regular) %s", rp->name);
585
 
        }
 
645
        if (!schedule_if_possible(rp, ncpus_used, ram_left, rrs, expected_payoff)) continue;
586
646
        ordered_scheduled_results.push_back(rp);
587
647
    }
588
648
 
590
650
    set_client_state_dirty("schedule_cpus");
591
651
}
592
652
 
593
 
// make a list of preemptable tasks, ordered by their preemptability.
 
653
// make a list of running tasks, ordered by their preemptability.
594
654
//
595
655
void CLIENT_STATE::make_running_task_heap(
596
 
    vector<ACTIVE_TASK*> &running_tasks
 
656
    vector<ACTIVE_TASK*> &running_tasks, double& ncpus_used
597
657
) {
598
658
    unsigned int i;
599
659
    ACTIVE_TASK* atp;
600
660
 
 
661
    ncpus_used = 0;
601
662
    for (i=0; i<active_tasks.active_tasks.size(); i++) {
602
663
        atp = active_tasks.active_tasks[i];
603
664
        if (atp->result->project->non_cpu_intensive) continue;
604
665
        if (!atp->result->runnable()) continue;
605
666
        if (atp->scheduler_state != CPU_SCHED_SCHEDULED) continue;
606
667
        running_tasks.push_back(atp);
 
668
        ncpus_used += atp->app_version->avg_ncpus;
607
669
    }
608
670
 
609
671
    std::make_heap(
634
696
    vector<ACTIVE_TASK*> running_tasks;
635
697
        static double last_time = 0;
636
698
    int retval;
 
699
    double ncpus_used;
637
700
 
638
701
    // Do this when requested, and once a minute as a safety net
639
702
    //
671
734
        }
672
735
    }
673
736
 
674
 
    // make list of currently running tasks
 
737
    // make heap of currently running tasks, ordered by preemptibility
675
738
    //
676
 
    make_running_task_heap(running_tasks);
 
739
    make_running_task_heap(running_tasks, ncpus_used);
677
740
 
678
741
    // if there are more running tasks than ncpus,
679
742
    // then mark the extras for preemption 
680
743
    //
681
 
    while (running_tasks.size() > (unsigned int)ncpus) {
682
 
        running_tasks[0]->next_scheduler_state = CPU_SCHED_PREEMPTED;
 
744
    while (ncpus_used > ncpus) {
 
745
        atp = running_tasks[0];
 
746
        atp->next_scheduler_state = CPU_SCHED_PREEMPTED;
 
747
        ncpus_used -= atp->app_version->avg_ncpus;
683
748
        std::pop_heap(
684
749
            running_tasks.begin(),
685
750
            running_tasks.end(),
697
762
        );
698
763
    }
699
764
 
700
 
    // keep track of how many tasks we plan on running
701
 
    // (i.e. have next_scheduler_state = SCHEDULED)
702
 
    //
703
 
    int nrunning = (int)running_tasks.size();
704
 
 
705
765
    // Loop through the scheduled results
706
 
    // to see if they should preempt a running task
707
766
    //
708
767
    for (i=0; i<ordered_scheduled_results.size(); i++) {
709
768
        RESULT* rp = ordered_scheduled_results[i];
713
772
            );
714
773
        }
715
774
 
716
 
        // See if the result is already running.
 
775
        // See if it's already running.
717
776
        //
718
777
        atp = NULL;
719
778
        for (vector<ACTIVE_TASK*>::iterator it = running_tasks.begin(); it != running_tasks.end(); it++) {
731
790
                break;
732
791
            }
733
792
        }
 
793
 
 
794
        // if it's already running, see if it fits in mem;
 
795
        // If not, flag for preemption
 
796
        //
734
797
        if (atp) {
735
798
            if (log_flags.cpu_sched_debug) {
736
799
                msg_printf(rp->project, MSG_INFO,
738
801
                );
739
802
            }
740
803
 
741
 
            // the scheduled result is already running.
742
 
            // see if it fits in mem
743
 
            //
744
804
            if (atp->procinfo.working_set_size_smoothed > ram_left) {
745
805
                atp->next_scheduler_state = CPU_SCHED_PREEMPTED;
746
806
                atp->too_large = true;
747
 
                nrunning--;
 
807
                ncpus_used -= atp->app_version->avg_ncpus;
748
808
                if (log_flags.mem_usage_debug) {
749
809
                    msg_printf(rp->project, MSG_INFO,
750
810
                        "[mem_usage_debug] enforce: result %s can't continue, too big %.2fMB > %.2fMB",
759
819
        }
760
820
 
761
821
        // Here if the result is not already running.
762
 
        // If it already has a (non-running) active task,
763
 
        // see if it fits in mem
 
822
        // If it already has an active task and won't fit in mem, skip it
764
823
        //
765
824
        atp = lookup_active_task_by_result(rp);
766
825
        if (atp) {
778
837
            }
779
838
        }
780
839
 
781
 
        // The scheduled result is not already running.  
782
 
        // Preempt something if needed and possible.
 
840
        // Preempt something if needed (and possible).
783
841
        //
784
842
        bool run_task = false;
785
 
        bool need_to_preempt = (nrunning==ncpus) && running_tasks.size();
 
843
        bool need_to_preempt = (ncpus_used >= ncpus) && running_tasks.size();
786
844
            // the 2nd half of the above is redundant
787
845
        if (need_to_preempt) {
788
846
            // examine the most preemptable task.
802
860
                    rp->project->deadlines_missed--;
803
861
                }
804
862
                atp->next_scheduler_state = CPU_SCHED_PREEMPTED;
805
 
                nrunning--;
 
863
                ncpus_used -= atp->app_version->avg_ncpus;
806
864
                std::pop_heap(
807
865
                    running_tasks.begin(),
808
866
                    running_tasks.end(),
830
888
        if (run_task) {
831
889
            atp = get_task(rp);
832
890
            atp->next_scheduler_state = CPU_SCHED_SCHEDULED;
833
 
            nrunning++;
 
891
            ncpus_used += atp->app_version->avg_ncpus;
834
892
            ram_left -= atp->procinfo.working_set_size_smoothed;
835
893
        }
836
894
    }
837
895
    if (log_flags.cpu_sched_debug) {
838
896
        msg_printf(0, MSG_INFO,
839
 
            "[cpu_sched_debug] finished preempt loop, nrunning %d",
840
 
            nrunning
 
897
            "[cpu_sched_debug] finished preempt loop, ncpus_used %f",
 
898
            ncpus_used
841
899
        );
842
900
    }
843
901
 
860
918
        }
861
919
    }
862
920
 
863
 
    if (log_flags.cpu_sched_debug && nrunning < ncpus) {
864
 
        msg_printf(0, MSG_INFO, "[cpu_sched_debug] Some CPUs idle (%d<%d)",
865
 
            nrunning, ncpus
866
 
        );
867
 
                request_work_fetch("CPUs idle");
868
 
    }
869
 
    if (log_flags.cpu_sched_debug && nrunning > ncpus) {
870
 
        msg_printf(0, MSG_INFO, "[cpu_sched_debug] Too many tasks started (%d>%d)",
871
 
            nrunning, ncpus
872
 
        );
 
921
    if (log_flags.cpu_sched_debug && ncpus_used < ncpus) {
 
922
        msg_printf(0, MSG_INFO, "[cpu_sched_debug] using %f out of %d CPUs",
 
923
            ncpus_used, ncpus
 
924
        );
 
925
        if (ncpus_used < ncpus) {
 
926
            request_work_fetch("CPUs idle");
 
927
        }
873
928
    }
874
929
 
875
930
    // schedule new non CPU intensive tasks
927
982
        case CPU_SCHED_SCHEDULED:
928
983
            switch (atp->task_state()) {
929
984
            case PROCESS_UNINITIALIZED:
 
985
                if (!sufficient_coprocs(*atp->app_version)) continue;
930
986
            case PROCESS_SUSPENDED:
931
987
                action = true;
932
988
                retval = atp->resume_or_start(
1463
1519
    if (config.ncpus>0) {
1464
1520
        ncpus = config.ncpus;
1465
1521
    } else if (host_info.p_ncpus>0) {
1466
 
        ncpus = host_info.p_ncpus;
 
1522
        ncpus = (int)((host_info.p_ncpus * global_prefs.max_ncpus_pct)/100);
 
1523
        if (ncpus == 0) ncpus = 1;
1467
1524
    } else {
1468
1525
        ncpus = 1;
1469
1526
    }
1470
 
    if (ncpus > global_prefs.max_cpus) ncpus = global_prefs.max_cpus;
1471
1527
 
1472
1528
    if (initialized && ncpus != ncpus_old) {
1473
1529
        msg_printf(0, MSG_INFO,
1567
1623
        }
1568
1624
}
1569
1625
 
1570
 
const char *BOINC_RCSID_e830ee1 = "$Id: cpu_sched.C 13888 2007-10-17 23:31:24Z romw $";
 
1626
const char *BOINC_RCSID_e830ee1 = "$Id: cpu_sched.C 15177 2008-05-12 20:27:55Z romw $";