3
3
* Note: there is a global node table (node_record_table_ptr)
4
4
*****************************************************************************
5
5
* Copyright (C) 2002-2007 The Regents of the University of California.
6
* Copyright (C) 2008 Lawrence Livermore National Security.
6
* Copyright (C) 2008-2009 Lawrence Livermore National Security.
7
7
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
8
8
* Written by Morris Jette <jette1@llnl.gov>
9
* CODE-OCEC-09-009. All rights reserved.
11
11
* This file is part of SLURM, a resource management program.
12
* For details, see <http://www.llnl.gov/linux/slurm/>.
12
* For details, see <https://computing.llnl.gov/linux/slurm/>.
13
* Please also read the included file: DISCLAIMER.
14
15
* SLURM is free software; you can redistribute it and/or modify it under
15
16
* the terms of the GNU General Public License as published by the Free
103
107
struct part_record *part_ptr,
104
108
uint32_t min_nodes, uint32_t max_nodes,
105
109
uint32_t req_nodes, bool test_only);
110
static void _reset_feature_counts(struct job_details *details_ptr);
111
static bool _valid_feature_counts(struct job_details *details_ptr);
106
112
static bitstr_t *_valid_features(struct job_details *detail_ptr,
107
struct config_record *config_ptr);
113
struct config_record *config_ptr,
323
344
uint32_t saved_min_nodes, saved_job_min_nodes;
324
345
bitstr_t *saved_req_node_bitmap = NULL;
325
346
uint32_t saved_num_procs, saved_req_nodes;
326
int tmp_node_set_size;
347
int rc, tmp_node_set_size;
327
348
struct node_set *tmp_node_set_ptr;
328
349
int error_code = SLURM_SUCCESS, i;
329
350
bitstr_t *feature_bitmap, *accumulate_bitmap = NULL;
351
bitstr_t *save_avail_node_bitmap = NULL, *resv_bitmap;
352
time_t start_res = time(NULL);
354
/* Mark nodes reserved for other jobs as off limit for this job */
355
rc = job_test_resv(job_ptr, &start_res, false, &resv_bitmap);
356
if ((rc != SLURM_SUCCESS) ||
357
(bit_set_count(resv_bitmap) < min_nodes) ||
358
(job_ptr->details->req_node_bitmap &&
359
(!bit_super_set(job_ptr->details->req_node_bitmap,
361
FREE_NULL_BITMAP(resv_bitmap);
362
return ESLURM_NODES_BUSY; /* reserved */
365
(!bit_equal(resv_bitmap, avail_node_bitmap))) {
366
bit_and(resv_bitmap, avail_node_bitmap);
367
save_avail_node_bitmap = avail_node_bitmap;
368
avail_node_bitmap = resv_bitmap;
370
FREE_NULL_BITMAP(resv_bitmap);
331
372
/* save job and request state */
332
373
saved_min_nodes = min_nodes;
371
415
node_set_ptr[i].weight;
372
416
tmp_node_set_ptr[tmp_node_set_size].features =
373
417
xstrdup(node_set_ptr[i].features);
374
tmp_node_set_ptr[tmp_node_set_size].feature_array =
418
tmp_node_set_ptr[tmp_node_set_size].
375
420
node_set_ptr[i].feature_array;
376
tmp_node_set_ptr[tmp_node_set_size].feature_bits =
421
tmp_node_set_ptr[tmp_node_set_size].
377
423
bit_copy(node_set_ptr[i].feature_bits);
378
tmp_node_set_ptr[tmp_node_set_size].my_bitmap =
424
tmp_node_set_ptr[tmp_node_set_size].my_bitmap =
379
425
bit_copy(node_set_ptr[i].my_bitmap);
380
426
tmp_node_set_size++;
393
439
char *tmp_str = bitmap2node_name(feature_bitmap);
394
info("job %u needs %u nodes with feature %s, using %s",
395
job_ptr->job_id, feat_ptr->count,
396
feat_ptr->name, tmp_str);
440
info("job %u needs %u nodes with feature %s, "
441
"using %s, error_code=%d",
442
job_ptr->job_id, feat_ptr->count,
443
feat_ptr->name, tmp_str, error_code);
400
447
for (i=0; i<tmp_node_set_size; i++) {
401
448
xfree(tmp_node_set_ptr[i].features);
402
FREE_NULL_BITMAP(tmp_node_set_ptr[i].feature_bits);
403
FREE_NULL_BITMAP(tmp_node_set_ptr[i].my_bitmap);
449
FREE_NULL_BITMAP(tmp_node_set_ptr[i].
451
FREE_NULL_BITMAP(tmp_node_set_ptr[i].
405
454
if (error_code != SLURM_SUCCESS)
407
456
if (feature_bitmap) {
408
457
if (job_ptr->details->req_node_bitmap) {
409
bit_or(job_ptr->details->req_node_bitmap,
458
bit_or(job_ptr->details->
412
462
job_ptr->details->req_node_bitmap =
413
463
bit_copy(feature_bitmap);
415
465
if (accumulate_bitmap) {
416
bit_or(accumulate_bitmap, feature_bitmap);
466
bit_or(accumulate_bitmap,
417
468
bit_free(feature_bitmap);
419
470
accumulate_bitmap = feature_bitmap;
570
629
cr_type = (select_type_plugin_info_t) slurmctld_conf.
571
630
select_type_param;
572
debug3("Job %u shared %d cr_enabled %d CR type %d num_procs %d",
632
/* Set the partially_idle_node_bitmap to reflect the
633
* idle and partially idle nodes */
634
error_code = select_g_get_info_from_plugin (SELECT_BITMAP,
635
job_ptr, &partially_idle_node_bitmap);
636
if (error_code != SLURM_SUCCESS) {
637
FREE_NULL_BITMAP(partially_idle_node_bitmap);
640
debug3("Job %u shared %d CR type %d num_procs %d nbits %d",
573
641
job_ptr->job_id, shared, cr_enabled, cr_type,
577
partially_idle_node_bitmap = bit_copy(idle_node_bitmap);
579
/* Update partially_idle_node_bitmap to reflect the
580
* idle and partially idle nodes */
581
error_code = select_g_get_info_from_plugin (
583
&partially_idle_node_bitmap);
584
if (error_code != SLURM_SUCCESS) {
585
FREE_NULL_BITMAP(partially_idle_node_bitmap);
643
bit_set_count(partially_idle_node_bitmap));
591
646
if (job_ptr->details->req_node_bitmap) { /* specific nodes required */
616
671
return ESLURM_NODES_BUSY;
620
if (!bit_super_set(job_ptr->details->req_node_bitmap,
621
share_node_bitmap)) {
622
FREE_NULL_BITMAP(partially_idle_node_bitmap);
623
return ESLURM_NODES_BUSY;
626
if (!bit_super_set(job_ptr->details->req_node_bitmap,
628
FREE_NULL_BITMAP(partially_idle_node_bitmap);
629
return ESLURM_NODES_BUSY;
674
/* If preemption is available via sched/gang, then
675
* do NOT limit the set of available nodes by their
676
* current 'sharable' or 'idle' setting */
677
if (!sched_gang_test) {
678
char *sched_type = slurm_get_sched_type();
679
if (strcmp(sched_type, "sched/gang") == 0)
682
sched_gang_test = true;
686
if (!bit_super_set(job_ptr->details->
688
share_node_bitmap)) {
690
partially_idle_node_bitmap);
691
return ESLURM_NODES_BUSY;
694
if (!bit_super_set(job_ptr->details->
698
partially_idle_node_bitmap);
699
return ESLURM_NODES_BUSY;
669
744
bit_and(node_set_ptr[i].my_bitmap,
670
745
partially_idle_node_bitmap);
673
bit_and(node_set_ptr[i].my_bitmap,
747
/* If preemption is available via sched/gang, then
748
* do NOT limit the set of available nodes by their
749
* current 'sharable' or 'idle' setting */
750
if (!sched_gang_test) {
751
char *sched_type = slurm_get_sched_type();
752
if (strcmp(sched_type, "sched/gang") == 0)
755
sched_gang_test = true;
759
bit_and(node_set_ptr[i].my_bitmap,
762
bit_and(node_set_ptr[i].my_bitmap,
768
node_set_ptr[i].my_bitmap);
676
bit_and(node_set_ptr[i].my_bitmap,
680
bit_or(avail_bitmap, node_set_ptr[i].my_bitmap);
682
770
avail_bitmap = bit_copy(
683
771
node_set_ptr[i].my_bitmap);
684
772
if (avail_bitmap == NULL)
955
1046
debug3("JobId=%u not runnable with present config",
956
1047
job_ptr->job_id);
957
1048
job_ptr->state_reason = WAIT_PART_NODE_LIMIT;
1049
xfree(job_ptr->state_desc);
958
1050
if (job_ptr->priority != 0) /* Move to end of queue */
959
1051
job_ptr->priority = 1;
960
1052
last_job_update = now;
1053
} else if (error_code == ESLURM_RESERVATION_NOT_USABLE) {
1054
job_ptr->state_reason = WAIT_RESERVATION;
1055
xfree(job_ptr->state_desc);
962
1057
job_ptr->state_reason = WAIT_RESOURCES;
1058
xfree(job_ptr->state_desc);
963
1059
if (error_code == ESLURM_NODES_BUSY)
964
1060
slurm_sched_job_is_pending();
1039
1146
return error_code;
1149
/* Clear tmp_cnt for all features of given job */
1150
static void _reset_feature_counts(struct job_details *details_ptr)
1152
ListIterator feat_iter;
1153
struct feature_record *feat_ptr;
1155
if (details_ptr->feature_list == NULL) /* no constraints */
1158
feat_iter = list_iterator_create(details_ptr->feature_list);
1159
while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) {
1160
feat_ptr->tmp_cnt = 0;
1162
list_iterator_destroy(feat_iter);
1165
/* Verify that tmp_cnt >= count for all features of given job */
1166
static bool _valid_feature_counts(struct job_details *details_ptr)
1168
ListIterator feat_iter;
1169
struct feature_record *feat_ptr;
1172
if (details_ptr->feature_list == NULL) /* no constraints */
1175
feat_iter = list_iterator_create(details_ptr->feature_list);
1176
while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) {
1177
if (feat_ptr->tmp_cnt >= feat_ptr->count)
1182
list_iterator_destroy(feat_iter);
1043
1187
* job_req_node_filter - job reqeust node filter.
1044
1188
* clear from a bitmap the nodes which can not be used for a job
1045
1189
* test memory size, required features, processor count, etc.
1046
* NOTE: Does not support exclusive OR of features or feature counts.
1190
* NOTE: Does not support exclusive OR of features.
1047
1191
* It just matches first element of XOR and ignores count.
1048
1192
* IN job_ptr - pointer to node to be scheduled
1049
1193
* IN/OUT bitmap - set of nodes being considered for use
1212
_reset_feature_counts(detail_ptr);
1068
1213
mc_ptr = detail_ptr->mc_ptr;
1069
1214
for (i=0; i< node_record_count; i++) {
1070
1215
if (!bit_test(avail_bitmap, i))
1072
1217
node_ptr = node_record_table_ptr + i;
1073
1218
config_ptr = node_ptr->config_ptr;
1074
feature_bitmap = _valid_features(detail_ptr, config_ptr);
1075
if ((feature_bitmap == NULL) || (!bit_test(feature_bitmap, 0))) {
1219
feature_bitmap = _valid_features(detail_ptr, config_ptr, true);
1220
if ((feature_bitmap == NULL) ||
1221
(!bit_test(feature_bitmap, 0))) {
1076
1222
bit_clear(avail_bitmap, i);
1079
1225
FREE_NULL_BITMAP(feature_bitmap);
1080
1226
if (slurmctld_conf.fast_schedule) {
1081
if ((detail_ptr->job_min_procs > config_ptr->cpus )
1227
if ((detail_ptr->job_min_procs >
1082
1229
|| ((detail_ptr->job_min_memory & (~MEM_PER_CPU)) >
1083
1230
config_ptr->real_memory)
1084
|| (detail_ptr->job_min_tmp_disk > config_ptr->tmp_disk)) {
1231
|| (detail_ptr->job_min_tmp_disk >
1232
config_ptr->tmp_disk)) {
1085
1233
bit_clear(avail_bitmap, i);
1091
1239
|| (mc_ptr->min_threads > config_ptr->threads )
1092
1240
|| (mc_ptr->job_min_sockets > config_ptr->sockets )
1093
1241
|| (mc_ptr->job_min_cores > config_ptr->cores )
1094
|| (mc_ptr->job_min_threads > config_ptr->threads ))) {
1242
|| (mc_ptr->job_min_threads >
1243
config_ptr->threads ))) {
1095
1244
bit_clear(avail_bitmap, i);
1099
if ((detail_ptr->job_min_procs > node_ptr->cpus )
1248
if ((detail_ptr->job_min_procs >
1100
1250
|| ((detail_ptr->job_min_memory & (~MEM_PER_CPU)) >
1101
1251
node_ptr->real_memory)
1102
|| (detail_ptr->job_min_tmp_disk > node_ptr->tmp_disk)) {
1252
|| (detail_ptr->job_min_tmp_disk >
1253
node_ptr->tmp_disk)) {
1103
1254
bit_clear(avail_bitmap, i);
1133
1288
struct node_set **node_set_pptr,
1134
1289
int *node_set_size)
1291
int i, node_set_inx, power_cnt, rc;
1137
1292
struct node_set *node_set_ptr;
1138
1293
struct config_record *config_ptr;
1139
1294
struct part_record *part_ptr = job_ptr->part_ptr;
1140
1295
ListIterator config_iterator;
1141
1296
int check_node_config, config_filter = 0;
1142
1297
struct job_details *detail_ptr = job_ptr->details;
1143
bitstr_t *exc_node_mask = NULL;
1298
bitstr_t *power_up_bitmap = NULL, *usable_node_mask = NULL;
1144
1299
multi_core_data_t *mc_ptr = detail_ptr->mc_ptr;
1145
1300
bitstr_t *tmp_feature;
1301
uint32_t max_weight = 0;
1303
if (job_ptr->resv_name) {
1304
/* Limit node selection to those in selected reservation */
1305
time_t start_res = time(NULL);
1306
rc = job_test_resv(job_ptr, &start_res, false,
1308
if (rc != SLURM_SUCCESS) {
1309
job_ptr->state_reason = WAIT_RESERVATION;
1310
xfree(job_ptr->state_desc);
1311
if (rc == ESLURM_INVALID_TIME_VALUE)
1312
return ESLURM_RESERVATION_NOT_USABLE;
1313
/* Defunct reservation or accesss denied */
1314
return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
1316
if ((detail_ptr->req_node_bitmap) &&
1317
(!bit_super_set(detail_ptr->req_node_bitmap,
1318
usable_node_mask))) {
1319
job_ptr->state_reason = WAIT_RESERVATION;
1320
xfree(job_ptr->state_desc);
1321
FREE_NULL_BITMAP(usable_node_mask);
1322
/* Required nodes outside of the reservation */
1323
return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
1147
1327
node_set_inx = 0;
1148
1328
node_set_ptr = (struct node_set *)
1149
1329
xmalloc(sizeof(struct node_set) * 2);
1150
1330
node_set_ptr[node_set_inx+1].my_bitmap = NULL;
1151
1331
if (detail_ptr->exc_node_bitmap) {
1152
exc_node_mask = bit_copy(detail_ptr->exc_node_bitmap);
1153
if (exc_node_mask == NULL)
1154
fatal("bit_copy malloc failure");
1155
bit_not(exc_node_mask);
1332
if (usable_node_mask) {
1333
bit_not(detail_ptr->exc_node_bitmap);
1334
bit_and(usable_node_mask, detail_ptr->exc_node_bitmap);
1335
bit_not(detail_ptr->exc_node_bitmap);
1338
bit_copy(detail_ptr->exc_node_bitmap);
1339
if (usable_node_mask == NULL)
1340
fatal("bit_copy malloc failure");
1341
bit_not(usable_node_mask);
1158
1345
config_iterator = list_iterator_create(config_list);
1251
1440
return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
1443
/* If any nodes are powered down, put them into a new node_set
1444
* record with a higher scheduling weight . This means we avoid
1445
* scheduling jobs on powered down nodes where possible. */
1446
for (i = (node_set_inx-1); i >= 0; i--) {
1447
power_cnt = bit_overlap(node_set_ptr[i].my_bitmap,
1450
continue; /* no nodes powered down */
1451
if (power_cnt == node_set_ptr[i].nodes) {
1452
node_set_ptr[i].weight += max_weight; /* avoid all */
1453
continue; /* all nodes powered down */
1456
/* Some nodes powered down, others up, split record */
1457
node_set_ptr[node_set_inx].cpus_per_node =
1458
node_set_ptr[i].cpus_per_node;
1459
node_set_ptr[node_set_inx].real_memory =
1460
node_set_ptr[i].real_memory;
1461
node_set_ptr[node_set_inx].nodes = power_cnt;
1462
node_set_ptr[i].nodes -= power_cnt;
1463
node_set_ptr[node_set_inx].weight =
1464
node_set_ptr[i].weight + max_weight;
1465
node_set_ptr[node_set_inx].features =
1466
xstrdup(node_set_ptr[i].features);
1467
node_set_ptr[node_set_inx].feature_array =
1468
node_set_ptr[i].feature_array;
1469
node_set_ptr[node_set_inx].feature_bits =
1470
bit_copy(node_set_ptr[i].feature_bits);
1471
node_set_ptr[node_set_inx].my_bitmap =
1472
bit_copy(node_set_ptr[i].my_bitmap);
1473
bit_and(node_set_ptr[node_set_inx].my_bitmap,
1475
if (power_up_bitmap == NULL) {
1476
power_up_bitmap = bit_copy(power_node_bitmap);
1477
bit_not(power_up_bitmap);
1479
bit_and(node_set_ptr[i].my_bitmap, power_up_bitmap);
1482
xrealloc(node_set_ptr,
1483
sizeof(struct node_set) * (node_set_inx + 2));
1484
node_set_ptr[node_set_inx + 1].my_bitmap = NULL;
1486
FREE_NULL_BITMAP(power_up_bitmap);
1254
1488
*node_set_size = node_set_inx;
1255
1489
*node_set_pptr = node_set_ptr;
1256
1490
return SLURM_SUCCESS;
1358
1592
return error_code;
1361
/* Update record of a job's allocated processors for each step */
1362
static void _alloc_step_cpus(struct job_record *job_ptr)
1364
ListIterator step_iterator;
1365
struct step_record *step_ptr;
1367
if (job_ptr->step_list == NULL)
1370
step_iterator = list_iterator_create(job_ptr->step_list);
1371
while ((step_ptr = (struct step_record *) list_next(step_iterator))) {
1372
step_alloc_lps(step_ptr);
1374
list_iterator_destroy(step_iterator);
1378
* build_node_details - set cpu counts and addresses for allocated nodes:
1379
* cpu_count_reps, cpus_per_node, node_addr, node_cnt, num_cpu_groups
1596
* build_node_details - sets addresses for allocated nodes
1380
1597
* IN job_ptr - pointer to a job record
1382
1599
extern void build_node_details(struct job_record *job_ptr)
1384
1601
hostlist_t host_list = NULL;
1385
1602
struct node_record *node_ptr;
1386
1603
char *this_node_name;
1387
int error_code = SLURM_SUCCESS;
1388
int node_inx = 0, cpu_inx = -1;
1390
uint32_t total_procs = 0;
1392
1606
if ((job_ptr->node_bitmap == NULL) || (job_ptr->nodes == NULL)) {
1393
1607
/* No nodes allocated, we're done... */
1394
job_ptr->num_cpu_groups = 0;
1395
1608
job_ptr->node_cnt = 0;
1396
job_ptr->cpus_per_node = NULL;
1397
job_ptr->cpu_count_reps = NULL;
1398
1609
job_ptr->node_addr = NULL;
1399
job_ptr->alloc_lps_cnt = 0;
1400
xfree(job_ptr->alloc_lps);
1401
xfree(job_ptr->used_lps);
1405
job_ptr->num_cpu_groups = 0;
1407
1613
/* Use hostlist here to insure ordering of info matches that of srun */
1408
1614
if ((host_list = hostlist_create(job_ptr->nodes)) == NULL)
1409
1615
fatal("hostlist_create error for %s: %m", job_ptr->nodes);
1411
1616
job_ptr->node_cnt = hostlist_count(host_list);
1413
xrealloc(job_ptr->cpus_per_node,
1414
(sizeof(uint32_t) * job_ptr->node_cnt));
1415
xrealloc(job_ptr->cpu_count_reps,
1416
(sizeof(uint32_t) * job_ptr->node_cnt));
1417
1617
xrealloc(job_ptr->node_addr,
1418
1618
(sizeof(slurm_addr) * job_ptr->node_cnt));
1420
job_ptr->alloc_lps_cnt = job_ptr->node_cnt;
1421
xrealloc(job_ptr->alloc_lps,
1422
(sizeof(uint32_t) * job_ptr->node_cnt));
1423
xrealloc(job_ptr->used_lps,
1424
(sizeof(uint32_t) * job_ptr->node_cnt));
1426
1620
while ((this_node_name = hostlist_shift(host_list))) {
1427
node_ptr = find_node_record(this_node_name);
1430
uint16_t usable_lps = 0;
1432
if(job_ptr->node_cnt == 1) {
1433
memcpy(&job_ptr->node_addr[node_inx++],
1434
&node_ptr->slurm_addr,
1435
sizeof(slurm_addr));
1438
job_ptr->cpus_per_node[cpu_inx] =
1440
total_procs += job_ptr->num_procs;
1441
job_ptr->cpu_count_reps[cpu_inx] = 1;
1442
job_ptr->alloc_lps[0] = job_ptr->num_procs;
1443
job_ptr->used_lps[0] = 0;
1447
error_code = select_g_get_extra_jobinfo(
1448
node_ptr, job_ptr, SELECT_AVAIL_CPUS,
1450
if (error_code == SLURM_SUCCESS) {
1451
if (job_ptr->alloc_lps) {
1452
job_ptr->used_lps[cr_count] = 0;
1453
job_ptr->alloc_lps[cr_count++] =
1457
error("Unable to get extra jobinfo "
1458
"from JobId=%u", job_ptr->job_id);
1459
/* Job is likely completed according to
1461
if (job_ptr->alloc_lps) {
1462
job_ptr->used_lps[cr_count] = 0;
1463
job_ptr->alloc_lps[cr_count++] = 0;
1621
if ((node_ptr = find_node_record(this_node_name))) {
1467
1622
memcpy(&job_ptr->node_addr[node_inx++],
1468
1623
&node_ptr->slurm_addr, sizeof(slurm_addr));
1470
if ((cpu_inx == -1) ||
1471
(job_ptr->cpus_per_node[cpu_inx] !=
1474
job_ptr->cpus_per_node[cpu_inx] =
1476
job_ptr->cpu_count_reps[cpu_inx] = 1;
1478
job_ptr->cpu_count_reps[cpu_inx]++;
1479
total_procs += usable_lps;
1482
1625
error("Invalid node %s in JobId=%u",
1483
1626
this_node_name, job_ptr->job_id);
1488
1628
free(this_node_name);
1490
1630
hostlist_destroy(host_list);
1512
1651
* mutually exclusive feature list.
1514
1653
static bitstr_t *_valid_features(struct job_details *details_ptr,
1515
struct config_record *config_ptr)
1654
struct config_record *config_ptr,
1517
1657
bitstr_t *result_bits = (bitstr_t *) NULL;
1518
1658
ListIterator feat_iter;
1519
1659
struct feature_record *feat_ptr;
1520
int found, last_op, position = 0, result;
1521
int save_op = FEATURE_OP_AND, save_result=1;
1660
bool found, test_names, result;
1661
int last_op, position = 0;
1662
int save_op = FEATURE_OP_AND, save_result = 1;
1523
if (details_ptr->feature_list == NULL) {/* no constraints */
1664
if (details_ptr->feature_list == NULL) { /* no constraints */
1524
1665
result_bits = bit_alloc(MAX_FEATURES);
1525
1666
bit_set(result_bits, 0);
1526
1667
return result_bits;
1529
result = 1; /* assume good for now */
1670
result = true; /* assume good for now */
1530
1671
last_op = FEATURE_OP_AND;
1531
1672
feat_iter = list_iterator_create(details_ptr->feature_list);
1532
1673
while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) {
1534
if (feat_ptr->count)
1536
else if (config_ptr->feature_array) {
1676
if (feat_ptr->count) {
1683
if (test_names && config_ptr->feature_array) {
1538
1685
for (i=0; config_ptr->feature_array[i]; i++) {
1539
1686
if (strcmp(feat_ptr->name,
1540
1687
config_ptr->feature_array[i]))
1690
if (update_count && feat_ptr->count)
1691
feat_ptr->tmp_cnt++;