1
/* Copyright 2013-2016 IBM Corp.
3
* Licensed under the Apache License, Version 2.0 (the "License");
4
* you may not use this file except in compliance with the License.
5
* You may obtain a copy of the License at
7
* http://www.apache.org/licenses/LICENSE-2.0
9
* Unless required by applicable law or agreed to in writing, software
10
* distributed under the License is distributed on an "AS IS" BASIS,
11
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13
* See the License for the specific language governing permissions and
14
* limitations under the License.
22
#include <mem_region.h>
25
#include <hostservices.h>
31
/* OCC Communication Area for PStates */
33
#define P8_HOMER_SAPPHIRE_DATA_OFFSET 0x1F8000
35
#define MAX_PSTATES 256
37
#define chip_occ_data(chip) \
38
((struct occ_pstate_table *)(chip->homer_base + \
39
P8_HOMER_SAPPHIRE_DATA_OFFSET))
41
static bool occ_reset;
42
static struct lock occ_lock = LOCK_UNLOCKED;
44
struct occ_pstate_entry {
53
* OCC-OPAL Shared Memory Region Version 2
54
* https://github.com/open-power/occ/blob/master/src/occ/proc/proc_pstate.h
55
* Interface defined in 'sapphire_table_t'
57
struct occ_pstate_table {
64
s8 pstate_ultra_turbo;
67
struct occ_pstate_entry pstates[MAX_PSTATES];
71
DEFINE_LOG_ENTRY(OPAL_RC_OCC_LOAD, OPAL_PLATFORM_ERR_EVT, OPAL_OCC,
72
OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL,
75
DEFINE_LOG_ENTRY(OPAL_RC_OCC_RESET, OPAL_PLATFORM_ERR_EVT, OPAL_OCC,
76
OPAL_CEC_HARDWARE, OPAL_PREDICTIVE_ERR_GENERAL,
79
DEFINE_LOG_ENTRY(OPAL_RC_OCC_PSTATE_INIT, OPAL_PLATFORM_ERR_EVT, OPAL_OCC,
80
OPAL_CEC_HARDWARE, OPAL_INFO,
83
DEFINE_LOG_ENTRY(OPAL_RC_OCC_TIMEOUT, OPAL_PLATFORM_ERR_EVT, OPAL_OCC,
84
OPAL_CEC_HARDWARE, OPAL_UNRECOVERABLE_ERR_GENERAL,
87
/* Check each chip's HOMER/Sapphire area for PState valid bit */
88
static bool wait_for_all_occ_init(void)
90
struct proc_chip *chip;
91
uint64_t occ_data_area;
92
struct occ_pstate_table *occ_data;
94
uint64_t start_time, end_time;
97
if (platform.occ_timeout)
98
timeout = platform.occ_timeout();
101
for_each_chip(chip) {
102
/* Check for valid homer address */
103
if (!chip->homer_base) {
105
* @fwts-label OCCInvalidHomerBase
106
* @fwts-advice The HOMER base address for a chip
107
* was not valid. This means that OCC (On Chip
108
* Controller) will be non-functional and CPU
109
* frequency scaling will not be functional. CPU may
110
* be set to a safe, low frequency. Power savings in
111
* CPU idle or CPU hotplug may be impacted.
113
prlog(PR_ERR,"OCC: Chip: %x homer_base is not valid\n",
118
if (!chip->occ_functional) {
119
prlog(PR_WARNING, "OCC: Chip: %x occ not functional\n",
124
/* Get PState table address */
125
occ_data_area = chip->homer_base + P8_HOMER_SAPPHIRE_DATA_OFFSET;
126
occ_data = (struct occ_pstate_table *)occ_data_area;
129
* Checking for occ_data->valid == 1 is ok because we clear all
130
* homer_base+size before passing memory to host services.
131
* This ensures occ_data->valid == 0 before OCC load
133
tries = timeout * 10;
134
while((occ_data->valid != 1) && tries--) {
137
if (occ_data->valid != 1) {
139
* @fwts-label OCCInvalidPStateTable
140
* @fwts-advice The pstate table for a chip
141
* was not valid. This means that OCC (On Chip
142
* Controller) will be non-functional and CPU
143
* frequency scaling will not be functional. CPU may
144
* be set to a low, safe frequency. This means
145
* that CPU idle states and CPU frequency scaling
146
* may not be functional.
148
prlog(PR_ERR, "OCC: Chip: %x PState table is not valid\n",
152
prlog(PR_DEBUG, "OCC: Chip %02x Data (%016llx) = %016llx\n",
153
chip->id, occ_data_area,
154
*(uint64_t *)occ_data_area);
157
prlog(PR_NOTICE, "OCC: All Chip Rdy after %lld ms\n",
158
(end_time - start_time) / 512 / 1000);
162
/* Add device tree properties to describe pstates states */
163
/* Retrun nominal pstate to set in each core */
164
static bool add_cpu_pstate_properties(s8 *pstate_nom)
166
struct proc_chip *chip;
167
uint64_t occ_data_area;
168
struct occ_pstate_table *occ_data;
169
struct dt_node *power_mgt;
170
u8 nr_pstates, nr_cores = 0;
172
/* Arrays for device tree */
173
u32 *dt_id, *dt_freq;
175
s8 *dt_core_max = NULL;
176
bool rc, ultra_turbo_en;
179
prlog(PR_DEBUG, "OCC: CPU pstate state device tree init\n");
181
/* Find first chip and core */
182
chip = next_chip(NULL);
184
/* Extract PState information from OCC */
186
/* Dump state table */
187
occ_data_area = chip->homer_base + P8_HOMER_SAPPHIRE_DATA_OFFSET;
189
prlog(PR_DEBUG, "OCC: Data (%16llx) = %16llx %16llx\n",
191
*(uint64_t *)occ_data_area,
192
*(uint64_t *)(occ_data_area+8));
194
occ_data = (struct occ_pstate_table *)occ_data_area;
196
if (!occ_data->valid) {
198
* @fwts-label OCCInvalidPStateTableDT
199
* @fwts-advice The pstate table for the first chip
200
* was not valid. This means that OCC (On Chip
201
* Controller) will be non-functional. This means
202
* that CPU idle states and CPU frequency scaling
203
* will not be functional as OPAL doesn't populate
204
* the device tree with pstates in this case.
206
prlog(PR_ERR, "OCC: PState table is not valid\n");
210
if (occ_data->version > 1 &&
211
occ_data->pstate_ultra_turbo > occ_data->pstate_turbo)
212
ultra_turbo_en = true;
214
ultra_turbo_en = false;
216
pmax = ultra_turbo_en ? occ_data->pstate_ultra_turbo :
217
occ_data->pstate_turbo;
218
nr_pstates = pmax - occ_data->pstate_min + 1;
219
prlog(PR_DEBUG, "OCC: Min %d Nom %d Max %d Nr States %d\n",
220
occ_data->pstate_min, occ_data->pstate_nom,
223
if (nr_pstates <= 1 || nr_pstates > 128) {
225
* @fwts-label OCCInvalidPStateRange
226
* @fwts-advice The number of pstates is outside the valid
227
* range (currently <=1 or > 128), so OPAL has not added
228
* pstates to the device tree. This means that OCC (On Chip
229
* Controller) will be non-functional. This means
230
* that CPU idle states and CPU frequency scaling
231
* will not be functional.
233
prlog(PR_ERR, "OCC: OCC range is not valid\n");
237
power_mgt = dt_find_by_path(dt_root, "/ibm,opal/power-mgt");
240
* @fwts-label OCCDTNodeNotFound
241
* @fwts-advice Device tree node /ibm,opal/power-mgt not
242
* found. OPAL didn't add pstate information to device tree.
243
* Probably a firmware bug.
245
prlog(PR_ERR, "OCC: dt node /ibm,opal/power-mgt not found\n");
251
/* Setup arrays for device-tree */
252
/* Allocate memory */
253
dt_id = malloc(nr_pstates * sizeof(u32));
256
* @fwts-label OCCdt_idENOMEM
257
* @fwts-advice Out of memory when allocating pstates array.
258
* No Pstates added to device tree, pstates not functional.
260
prlog(PR_ERR, "OCC: dt_id array alloc failure\n");
264
dt_freq = malloc(nr_pstates * sizeof(u32));
267
* @fwts-label OCCdt_freqENOMEM
268
* @fwts-advice Out of memory when allocating pstates array.
269
* No Pstates added to device tree, pstates not functional.
271
prlog(PR_ERR, "OCC: dt_freq array alloc failure\n");
275
dt_vdd = malloc(nr_pstates * sizeof(u8));
278
* @fwts-label OCCdt_vddENOMEM
279
* @fwts-advice Out of memory when allocating pstates array.
280
* No Pstates added to device tree, pstates not functional.
282
prlog(PR_ERR, "OCC: dt_vdd array alloc failure\n");
286
dt_vcs = malloc(nr_pstates * sizeof(u8));
289
* @fwts-label OCCdt_vcsENOMEM
290
* @fwts-advice Out of memory when allocating pstates array.
291
* No Pstates added to device tree, pstates not functional.
293
prlog(PR_ERR, "OCC: dt_vcs array alloc failure\n");
297
if (ultra_turbo_en) {
298
nr_cores = get_available_nr_cores_in_chip(chip->id);
299
dt_core_max = malloc(nr_cores * sizeof(s8));
302
* @fwts-label OCCdt_core_maxENOMEM
303
* @fwts-advice Out of memory allocating dt_core_max
304
* array. No PStates in Device Tree: non-functional
305
* power/frequency management.
307
prlog(PR_ERR, "OCC: dt_core_max alloc failure\n");
311
for (i = 0; i < nr_cores; i++)
312
dt_core_max[i] = occ_data->core_max[i];
315
for (i = 0, j = 0; i < MAX_PSTATES && j < nr_pstates; i++) {
316
if (occ_data->pstates[i].id > pmax ||
317
occ_data->pstates[i].id < occ_data->pstate_min)
319
dt_id[j] = occ_data->pstates[i].id;
320
dt_freq[j] = occ_data->pstates[i].freq_khz / 1000;
321
dt_vdd[j] = occ_data->pstates[i].vdd;
322
dt_vcs[j] = occ_data->pstates[i].vcs;
326
/* Add the device-tree entries */
327
dt_add_property(power_mgt, "ibm,pstate-ids", dt_id,
328
nr_pstates * sizeof(u32));
329
dt_add_property(power_mgt, "ibm,pstate-frequencies-mhz", dt_freq,
330
nr_pstates * sizeof(u32));
331
dt_add_property(power_mgt, "ibm,pstate-vdds", dt_vdd, nr_pstates);
332
dt_add_property(power_mgt, "ibm,pstate-vcss", dt_vcs, nr_pstates);
333
dt_add_property_cells(power_mgt, "ibm,pstate-min", occ_data->pstate_min);
334
dt_add_property_cells(power_mgt, "ibm,pstate-nominal", occ_data->pstate_nom);
335
dt_add_property_cells(power_mgt, "ibm,pstate-max", pmax);
337
if (ultra_turbo_en) {
338
dt_add_property_cells(power_mgt, "ibm,pstate-turbo",
339
occ_data->pstate_turbo);
340
dt_add_property_cells(power_mgt, "ibm,pstate-ultra-turbo",
341
occ_data->pstate_ultra_turbo);
342
dt_add_property(power_mgt, "ibm,pstate-core-max", dt_core_max,
347
/* Return pstate to set for each core */
348
*pstate_nom = occ_data->pstate_nom;
364
* Prepare chip for pstate transitions
367
static bool cpu_pstates_prepare_core(struct proc_chip *chip, struct cpu_thread *c, s8 pstate_nom)
369
uint32_t core = pir_to_core_id(c->pir);
370
uint64_t tmp, pstate;
374
* Currently Fastsleep init clears EX_PM_SPR_OVERRIDE_EN.
375
* Need to ensure only relevant bits are inited
378
/* Init PM GP1 for SCOM based PSTATE control to set nominal freq
380
* Use the OR SCOM to set the required bits in PM_GP1 register
381
* since the OCC might be mainpulating the PM_GP1 register as well.
383
rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_SET_GP1),
384
EX_PM_SETUP_GP1_PM_SPR_OVERRIDE_EN);
386
log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
387
"OCC: Failed to write PM_GP1 in pstates init\n");
391
/* Set new pstate to core */
392
rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_PPMCR), &tmp);
394
log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
395
"OCC: Failed to read from OCC in pstates init\n");
398
tmp = tmp & ~0xFFFF000000000000ULL;
399
pstate = ((uint64_t) pstate_nom) & 0xFF;
400
tmp = tmp | (pstate << 56) | (pstate << 48);
401
rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_PPMCR), tmp);
403
log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
404
"OCC: Failed to write PM_GP1 in pstates init\n");
407
time_wait_ms(1); /* Wait for PState to change */
409
* Init PM GP1 for SPR based PSTATE control.
410
* Once OCC is active EX_PM_SETUP_GP1_DPLL_FREQ_OVERRIDE_EN will be
411
* cleared by OCC. Sapphire need not clear.
412
* However wait for DVFS state machine to become idle after min->nominal
413
* transition initiated above. If not switch over to SPR control could fail.
415
* Use the AND SCOM to clear the required bits in PM_GP1 register
416
* since the OCC might be mainpulating the PM_GP1 register as well.
418
tmp = ~EX_PM_SETUP_GP1_PM_SPR_OVERRIDE_EN;
419
rc = xscom_write(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_CLEAR_GP1),
422
log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
423
"OCC: Failed to write PM_GP1 in pstates init\n");
428
rc = xscom_read(chip->id, XSCOM_ADDR_P8_EX_SLAVE(core, EX_PM_PPMSR), &tmp);
430
log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
431
"OCC: Failed to read back setting from OCC"
432
"in pstates init\n");
435
prlog(PR_DEBUG, "OCC: Chip %x Core %x PPMSR %016llx\n",
436
chip->id, core, tmp);
439
* If PMSR is still in transition at this point due to PState change
440
* initiated above, then the switchover to SPR may not work.
441
* ToDo: Check for DVFS state machine idle before change.
447
static bool occ_opal_msg_outstanding = false;
448
static void occ_msg_consumed(void *data __unused)
451
occ_opal_msg_outstanding = false;
455
static void occ_throttle_poll(void *data __unused)
457
struct proc_chip *chip;
458
struct occ_pstate_table *occ_data;
459
struct opal_occ_msg occ_msg;
462
if (!try_lock(&occ_lock))
467
for_each_chip(chip) {
468
occ_data = chip_occ_data(chip);
469
if (occ_data->valid != 1) {
476
* Queue OCC_THROTTLE with throttle status as 0 to
477
* indicate all OCCs are active after a reset.
479
occ_msg.type = cpu_to_be64(OCC_THROTTLE);
481
occ_msg.throttle_status = 0;
482
rc = _opal_queue_msg(OPAL_MSG_OCC, NULL, NULL, 3,
483
(uint64_t *)&occ_msg);
488
if (occ_opal_msg_outstanding)
490
for_each_chip(chip) {
491
occ_data = chip_occ_data(chip);
492
if ((occ_data->valid == 1) &&
493
(chip->throttle != occ_data->throttle) &&
494
(occ_data->throttle <= OCC_MAX_THROTTLE_STATUS)) {
495
occ_msg.type = cpu_to_be64(OCC_THROTTLE);
496
occ_msg.chip = cpu_to_be64(chip->id);
497
occ_msg.throttle_status = cpu_to_be64(occ_data->throttle);
498
rc = _opal_queue_msg(OPAL_MSG_OCC, NULL,
500
3, (uint64_t *)&occ_msg);
502
chip->throttle = occ_data->throttle;
503
occ_opal_msg_outstanding = true;
513
/* CPU-OCC PState init */
514
/* Called after OCC init on P8 */
515
void occ_pstates_init(void)
517
struct proc_chip *chip;
518
struct cpu_thread *c;
522
if (proc_gen != proc_gen_p8)
525
chip = next_chip(NULL);
526
if (!chip->homer_base) {
527
log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
528
"OCC: No HOMER detected, assuming no pstates\n");
532
/* Wait for all OCC to boot up */
533
if(!wait_for_all_occ_init()) {
534
log_simple_error(&e_info(OPAL_RC_OCC_TIMEOUT),
535
"OCC: Initialization on all chips did not complete"
541
* Check boundary conditions and add device tree nodes
542
* and return nominal pstate to set for the core
544
if (!add_cpu_pstate_properties(&pstate_nom)) {
545
log_simple_error(&e_info(OPAL_RC_OCC_PSTATE_INIT),
546
"Skiping core cpufreq init due to OCC error\n");
550
/* Setup host based pstates and set nominal frequency */
551
for_each_chip(chip) {
552
for_each_available_core_in_chip(c, chip->id) {
553
cpu_pstates_prepare_core(chip, c, pstate_nom);
557
/* Add opal_poller to poll OCC throttle status of each chip */
560
opal_add_poller(occ_throttle_poll, NULL);
563
struct occ_load_req {
567
struct list_node link;
569
static LIST_HEAD(occ_load_req_list);
571
int find_master_and_slave_occ(uint64_t **master, uint64_t **slave,
572
int *nr_masters, int *nr_slaves)
574
struct proc_chip *chip;
576
uint64_t chipids[MAX_CHIPS];
578
for_each_chip(chip) {
579
chipids[nr_chips++] = chip->id;
582
chip = next_chip(NULL);
584
* Proc0 is the master OCC for Tuleta/Alpine boxes.
585
* Hostboot expects the pair of chips for MURANO, so pass the sibling
586
* chip id along with proc0 to hostboot.
588
*nr_masters = (chip->type == PROC_CHIP_P8_MURANO) ? 2 : 1;
589
*master = (uint64_t *)malloc(*nr_masters * sizeof(uint64_t));
592
printf("OCC: master array alloc failure\n");
596
if (nr_chips - *nr_masters > 0) {
597
*nr_slaves = nr_chips - *nr_masters;
598
*slave = (uint64_t *)malloc(*nr_slaves * sizeof(uint64_t));
600
printf("OCC: slave array alloc failure\n");
605
for (i = 0; i < nr_chips; i++) {
606
if (i < *nr_masters) {
607
*(*master + i) = chipids[i];
610
*(*slave + i - *nr_masters) = chipids[i];
615
static void occ_queue_load(u8 scope, u32 dbob_id, u32 seq_id)
617
struct occ_load_req *occ_req;
619
occ_req = zalloc(sizeof(struct occ_load_req));
622
* @fwts-label OCCload_reqENOMEM
623
* @fwts-advice ENOMEM while allocating OCC load message.
624
* OCCs not started, consequently no power/frequency scaling
625
* will be functional.
627
prlog(PR_ERR, "OCC: Could not allocate occ_load_req\n");
631
occ_req->scope = scope;
632
occ_req->dbob_id = dbob_id;
633
occ_req->seq_id = seq_id;
634
list_add_tail(&occ_load_req_list, &occ_req->link);
637
static void __occ_do_load(u8 scope, u32 dbob_id __unused, u32 seq_id)
639
struct fsp_msg *stat;
642
struct proc_chip *chip = next_chip(NULL);
645
rc = host_services_occ_load();
647
/* Handle fallback to preload */
648
if (rc == -ENOENT && chip->homer_base) {
649
prlog(PR_INFO, "OCC: Load: Fallback to preloaded image\n");
652
struct opal_occ_msg occ_msg = { CPU_TO_BE64(OCC_LOAD), 0, 0 };
654
rc = _opal_queue_msg(OPAL_MSG_OCC, NULL, NULL, 3,
655
(uint64_t *)&occ_msg);
657
prlog(PR_INFO, "OCC: Failed to queue message %d\n",
660
/* Success, start OCC */
661
rc = host_services_occ_start();
664
/* If either of hostservices call fail, send fail to FSP */
665
/* Find a chip ID to send failure */
666
for_each_chip(chip) {
667
if (scope == 0x01 && dbob_id != chip->dbob_id)
669
status_word = 0xB500 | (chip->pcid & 0xff);
672
log_simple_error(&e_info(OPAL_RC_OCC_LOAD),
673
"OCC: Error %d in load/start OCC\n", rc);
676
/* Send a single response for all chips */
677
stat = fsp_mkmsg(FSP_CMD_LOAD_OCC_STAT, 2, status_word, seq_id);
679
rc = fsp_queue_msg(stat, fsp_freemsg);
681
log_simple_error(&e_info(OPAL_RC_OCC_LOAD),
682
"OCC: Error %d queueing FSP OCC LOAD STATUS msg", rc);
687
void occ_poke_load_queue(void)
689
struct occ_load_req *occ_req, *next;
691
if (list_empty(&occ_load_req_list))
694
list_for_each_safe(&occ_load_req_list, occ_req, next, link) {
695
__occ_do_load(occ_req->scope, occ_req->dbob_id,
697
list_del(&occ_req->link);
702
static void occ_do_load(u8 scope, u32 dbob_id __unused, u32 seq_id)
708
if (scope != 0x01 && scope != 0x02) {
710
* @fwts-label OCCLoadInvalidScope
711
* @fwts-advice Invalid request for loading OCCs. Power and
712
* frequency management not functional
714
prlog(PR_ERR, "OCC: Load message with invalid scope 0x%x\n",
719
/* First queue up an OK response to the load message itself */
720
rsp = fsp_mkmsg(FSP_RSP_LOAD_OCC | err, 0);
722
rc = fsp_queue_msg(rsp, fsp_freemsg);
724
log_simple_error(&e_info(OPAL_RC_OCC_LOAD),
725
"OCC: Error %d queueing FSP OCC LOAD reply\n", rc);
734
* Check if hostservices lid caching is complete. If not, queue
737
if (!hservices_lid_preload_complete()) {
738
occ_queue_load(scope, dbob_id, seq_id);
742
__occ_do_load(scope, dbob_id, seq_id);
745
int occ_msg_queue_occ_reset(void)
747
struct opal_occ_msg occ_msg = { OCC_RESET, 0, 0 };
748
struct proc_chip *chip;
752
rc = _opal_queue_msg(OPAL_MSG_OCC, NULL, NULL, 3,
753
(uint64_t *)&occ_msg);
755
prlog(PR_INFO, "OCC: Failed to queue OCC_RESET message\n");
759
* Set 'valid' byte of chip_occ_data to 0 since OCC
760
* may not clear this byte on a reset.
761
* OCC will set the 'valid' byte to 1 when it becomes
764
for_each_chip(chip) {
765
struct occ_pstate_table *occ_data;
767
occ_data = chip_occ_data(chip);
777
static void occ_do_reset(u8 scope, u32 dbob_id, u32 seq_id)
779
struct fsp_msg *rsp, *stat;
780
struct proc_chip *chip = next_chip(NULL);
784
/* Check arguments */
785
if (scope != 0x01 && scope != 0x02) {
787
* @fwts-label OCCResetInvalidScope
788
* @fwts-advice Invalid request for resetting OCCs. Power and
789
* frequency management not functional
791
prlog(PR_ERR, "OCC: Reset message with invalid scope 0x%x\n",
796
/* First queue up an OK response to the reset message itself */
797
rsp = fsp_mkmsg(FSP_RSP_RESET_OCC | err, 0);
799
rc = fsp_queue_msg(rsp, fsp_freemsg);
802
log_simple_error(&e_info(OPAL_RC_OCC_RESET),
803
"OCC: Error %d queueing FSP OCC RESET reply\n", rc);
807
/* If we had an error, return */
812
* Call HBRT to stop OCC and leave it stopped. FSP will send load/start
813
* request subsequently. Also after few runtime restarts (currently 3),
814
* FSP will request OCC to left in stopped state.
817
rc = host_services_occ_stop();
819
/* Handle fallback to preload */
820
if (rc == -ENOENT && chip->homer_base) {
821
prlog(PR_INFO, "OCC: Reset: Fallback to preloaded image\n");
825
/* Send a single success response for all chips */
826
stat = fsp_mkmsg(FSP_CMD_RESET_OCC_STAT, 2, 0, seq_id);
828
rc = fsp_queue_msg(stat, fsp_freemsg);
831
log_simple_error(&e_info(OPAL_RC_OCC_RESET),
832
"OCC: Error %d queueing FSP OCC RESET"
833
" STATUS message\n", rc);
835
occ_msg_queue_occ_reset();
839
* Then send a matching OCC Reset Status message with an 0xFE
840
* (fail) response code as well to the first matching chip
842
for_each_chip(chip) {
843
if (scope == 0x01 && dbob_id != chip->dbob_id)
846
stat = fsp_mkmsg(FSP_CMD_RESET_OCC_STAT, 2,
847
0xfe00 | (chip->pcid & 0xff), seq_id);
849
rc = fsp_queue_msg(stat, fsp_freemsg);
852
log_simple_error(&e_info(OPAL_RC_OCC_RESET),
853
"OCC: Error %d queueing FSP OCC RESET"
854
" STATUS message\n", rc);
861
#define PV_OCC_GP0 0x01000000
862
#define PV_OCC_GP0_AND 0x01000004
863
#define PV_OCC_GP0_OR 0x01000005
864
#define PV_OCC_GP0_PNOR_OWNER PPC_BIT(18) /* 1 = OCC / Host, 0 = BMC */
866
static void occ_pnor_set_one_owner(uint32_t chip_id, enum pnor_owner owner)
870
if (owner == PNOR_OWNER_HOST) {
872
mask = PV_OCC_GP0_PNOR_OWNER;
874
reg = PV_OCC_GP0_AND;
875
mask = ~PV_OCC_GP0_PNOR_OWNER;
878
xscom_write(chip_id, reg, mask);
881
void occ_pnor_set_owner(enum pnor_owner owner)
883
struct proc_chip *chip;
886
occ_pnor_set_one_owner(chip->id, owner);
889
static bool fsp_occ_msg(u32 cmd_sub_mod, struct fsp_msg *msg)
894
switch (cmd_sub_mod) {
895
case FSP_CMD_LOAD_OCC:
897
* We get the "Load OCC" command at boot. We don't currently
898
* support loading it ourselves (we don't have the procedures,
899
* they will come with Host Services). For now HostBoot will
900
* have loaded a OCC firmware for us, but we still need to
901
* be nice and respond to OCC.
903
scope = msg->data.bytes[3];
904
dbob_id = msg->data.words[1];
905
seq_id = msg->data.words[2];
906
prlog(PR_INFO, "OCC: Got OCC Load message, scope=0x%x"
907
" dbob=0x%x seq=0x%x\n", scope, dbob_id, seq_id);
908
occ_do_load(scope, dbob_id, seq_id);
911
case FSP_CMD_RESET_OCC:
913
* We shouldn't be getting this one, but if we do, we have
914
* to reply something sensible or the FSP will get upset
916
scope = msg->data.bytes[3];
917
dbob_id = msg->data.words[1];
918
seq_id = msg->data.words[2];
919
prlog(PR_INFO, "OCC: Got OCC Reset message, scope=0x%x"
920
" dbob=0x%x seq=0x%x\n", scope, dbob_id, seq_id);
921
occ_do_reset(scope, dbob_id, seq_id);
927
static struct fsp_client fsp_occ_client = {
928
.message = fsp_occ_msg,
931
#define OCB_OCI_OCCMISC 0x6a020
932
#define OCB_OCI_OCCMISC_AND 0x6a021
933
#define OCB_OCI_OCCMISC_OR 0x6a022
934
#define OCB_OCI_OCIMISC_IRQ PPC_BIT(0)
935
#define OCB_OCI_OCIMISC_IRQ_TMGT PPC_BIT(1)
936
#define OCB_OCI_OCIMISC_IRQ_SLW_TMR PPC_BIT(14)
937
#define OCB_OCI_OCIMISC_IRQ_OPAL_DUMMY PPC_BIT(15)
938
#define OCB_OCI_OCIMISC_MASK (OCB_OCI_OCIMISC_IRQ_TMGT | \
939
OCB_OCI_OCIMISC_IRQ_OPAL_DUMMY | \
940
OCB_OCI_OCIMISC_IRQ_SLW_TMR)
942
void occ_send_dummy_interrupt(void)
945
struct proc_chip *chip = get_chip(this_cpu()->chip_id);
947
/* Emulators and P7 doesn't do this */
948
if (proc_gen != proc_gen_p8 || chip_quirk(QUIRK_NO_OCC_IRQ))
951
/* Find a functional PSI. This ensures an interrupt even if
952
* the psihb on the current chip is not configured */
956
psi = psi_find_functional_chip();
959
prlog_once(PR_WARNING, "PSI: no functional PSI HB found, "
960
"no self interrupts delivered\n");
964
xscom_write(psi->chip_id, OCB_OCI_OCCMISC_OR,
965
OCB_OCI_OCIMISC_IRQ | OCB_OCI_OCIMISC_IRQ_OPAL_DUMMY);
968
void occ_interrupt(uint32_t chip_id)
973
/* The OCC interrupt is used to mux up to 15 different sources */
974
rc = xscom_read(chip_id, OCB_OCI_OCCMISC, &ireg);
976
prerror("OCC: Failed to read interrupt status !\n");
977
/* Should we mask it in the XIVR ? */
980
prlog(PR_TRACE, "OCC: IRQ received: %04llx\n", ireg >> 48);
983
xscom_write(chip_id, OCB_OCI_OCCMISC_AND, ~ireg);
986
if (ireg & OCB_OCI_OCIMISC_IRQ_TMGT)
987
prd_tmgt_interrupt(chip_id);
988
if (ireg & OCB_OCI_OCIMISC_IRQ_SLW_TMR)
991
/* We may have masked-out OCB_OCI_OCIMISC_IRQ in the previous
992
* OCCMISC_AND write. Check if there are any new source bits set,
993
* and trigger another interrupt if so.
995
rc = xscom_read(chip_id, OCB_OCI_OCCMISC, &ireg);
996
if (!rc && (ireg & OCB_OCI_OCIMISC_MASK))
997
xscom_write(chip_id, OCB_OCI_OCCMISC_OR, OCB_OCI_OCIMISC_IRQ);
1000
void occ_fsp_init(void)
1002
/* OCC is P8 only */
1003
if (proc_gen != proc_gen_p8)
1006
/* If we have an FSP, register for notifications */
1008
fsp_register_client(&fsp_occ_client, FSP_MCLASS_OCC);