1
/* Copyright 2014-2015 IBM Corp.
3
* Licensed under the Apache License, Version 2.0 (the "License");
4
* you may not use this file except in compliance with the License.
5
* You may obtain a copy of the License at
7
* http://www.apache.org/licenses/LICENSE-2.0
9
* Unless required by applicable law or agreed to in writing, software
10
* distributed under the License is distributed on an "AS IS" BASIS,
11
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13
* See the License for the specific language governing permissions and
14
* imitations under the License.
24
#include <mem_region.h>
28
EVENT_OCC_ERROR = 1 << 1,
29
EVENT_OCC_RESET = 1 << 2,
32
static uint8_t events[MAX_CHIPS];
33
static uint64_t ipoll_status[MAX_CHIPS];
34
static struct opal_prd_msg prd_msg;
35
static bool prd_msg_inuse, prd_active;
36
struct dt_node *prd_node;
40
* The events lock serialises access to the events, ipoll_status,
41
* prd_msg_inuse, and prd_active variables.
43
* The ipoll_lock protects against concurrent updates to the ipoll registers.
45
* The ipoll_lock may be acquired with events_lock held. This order must
48
static struct lock events_lock = LOCK_UNLOCKED;
49
static struct lock ipoll_lock = LOCK_UNLOCKED;
52
#define PRD_IPOLL_REG_MASK 0x01020013
53
#define PRD_IPOLL_REG_STATUS 0x01020014
54
#define PRD_IPOLL_XSTOP PPC_BIT(0) /* Xstop for host/core/millicode */
55
#define PRD_IPOLL_RECOV PPC_BIT(1) /* Recoverable */
56
#define PRD_IPOLL_SPEC_ATTN PPC_BIT(2) /* Special attention */
57
#define PRD_IPOLL_HOST_ATTN PPC_BIT(3) /* Host attention */
58
#define PRD_IPOLL_MASK PPC_BITMASK(0, 3)
60
static int queue_prd_msg_hbrt(struct opal_prd_msg *msg,
61
void (*consumed)(void *data))
65
BUILD_ASSERT(sizeof(*msg) / sizeof(uint64_t) == 4);
67
buf = (uint64_t *)msg;
69
return _opal_queue_msg(OPAL_MSG_PRD, msg, consumed, 4, buf);
72
static int queue_prd_msg_nop(struct opal_prd_msg *msg,
73
void (*consumed)(void *data))
77
return OPAL_UNSUPPORTED;
80
static int (*queue_prd_msg)(struct opal_prd_msg *msg,
81
void (*consumed)(void *data)) = queue_prd_msg_nop;
83
static void send_next_pending_event(void);
85
static void prd_msg_consumed(void *data)
87
struct opal_prd_msg *msg = data;
92
switch (msg->hdr.type) {
93
case OPAL_PRD_MSG_TYPE_ATTN:
94
proc = msg->attn.proc;
96
/* If other ipoll events have been received in the time
97
* between prd_msg creation and consumption, we'll need to
98
* raise a separate ATTN message for those. So, we only
99
* clear the event if we don't have any further ipoll_status
102
ipoll_status[proc] &= ~msg->attn.ipoll_status;
103
if (!ipoll_status[proc])
107
case OPAL_PRD_MSG_TYPE_OCC_ERROR:
108
proc = msg->occ_error.chip;
109
event = EVENT_OCC_ERROR;
111
case OPAL_PRD_MSG_TYPE_OCC_RESET:
112
proc = msg->occ_reset.chip;
113
event = EVENT_OCC_RESET;
116
prlog(PR_ERR, "PRD: invalid msg consumed, type: 0x%x\n",
121
events[proc] &= ~event;
122
prd_msg_inuse = false;
123
send_next_pending_event();
124
unlock(&events_lock);
127
static int populate_ipoll_msg(struct opal_prd_msg *msg, uint32_t proc)
133
rc = xscom_read(proc, PRD_IPOLL_REG_MASK, &ipoll_mask);
137
prlog(PR_ERR, "PRD: Unable to read ipoll status (chip %d)!\n",
142
msg->attn.proc = proc;
143
msg->attn.ipoll_status = ipoll_status[proc];
144
msg->attn.ipoll_mask = ipoll_mask;
148
static void send_next_pending_event(void)
150
struct proc_chip *chip;
154
assert(!prd_msg_inuse);
161
for_each_chip(chip) {
164
event = events[proc];
172
prd_msg_inuse = true;
174
prd_msg.hdr.size = sizeof(prd_msg);
176
if (event & EVENT_ATTN) {
177
prd_msg.hdr.type = OPAL_PRD_MSG_TYPE_ATTN;
178
populate_ipoll_msg(&prd_msg, proc);
179
} else if (event & EVENT_OCC_ERROR) {
180
prd_msg.hdr.type = OPAL_PRD_MSG_TYPE_OCC_ERROR;
181
prd_msg.occ_error.chip = proc;
182
} else if (event & EVENT_OCC_RESET) {
183
prd_msg.hdr.type = OPAL_PRD_MSG_TYPE_OCC_RESET;
184
prd_msg.occ_reset.chip = proc;
185
occ_msg_queue_occ_reset();
188
queue_prd_msg(&prd_msg, prd_msg_consumed);
191
static void __prd_event(uint32_t proc, uint8_t event)
193
events[proc] |= event;
195
send_next_pending_event();
198
static void prd_event(uint32_t proc, uint8_t event)
201
__prd_event(proc, event);
202
unlock(&events_lock);
205
static int __ipoll_update_mask(uint32_t proc, bool set, uint64_t bits)
210
rc = xscom_read(proc, PRD_IPOLL_REG_MASK, &mask);
219
return xscom_write(proc, PRD_IPOLL_REG_MASK, mask);
222
static int ipoll_record_and_mask_pending(uint32_t proc)
228
rc = xscom_read(proc, PRD_IPOLL_REG_STATUS, &status);
229
status &= PRD_IPOLL_MASK;
231
__ipoll_update_mask(proc, true, status);
235
ipoll_status[proc] |= status;
240
/* Entry point for interrupts */
241
void prd_psi_interrupt(uint32_t proc)
247
rc = ipoll_record_and_mask_pending(proc);
249
prlog(PR_ERR, "PRD: Failed to update IPOLL mask\n");
251
__prd_event(proc, EVENT_ATTN);
253
unlock(&events_lock);
256
void prd_tmgt_interrupt(uint32_t proc)
258
prd_event(proc, EVENT_OCC_ERROR);
261
void prd_occ_reset(uint32_t proc)
263
prd_event(proc, EVENT_OCC_RESET);
266
/* incoming message handlers */
267
static int prd_msg_handle_attn_ack(struct opal_prd_msg *msg)
272
rc = __ipoll_update_mask(msg->attn_ack.proc, false,
273
msg->attn_ack.ipoll_ack & PRD_IPOLL_MASK);
277
prlog(PR_ERR, "PRD: Unable to unmask ipoll!\n");
282
static int prd_msg_handle_init(struct opal_prd_msg *msg)
284
struct proc_chip *chip;
287
for_each_chip(chip) {
288
__ipoll_update_mask(chip->id, false,
289
msg->init.ipoll & PRD_IPOLL_MASK);
293
/* we're transitioning from inactive to active; send any pending tmgt
298
send_next_pending_event();
299
unlock(&events_lock);
304
static int prd_msg_handle_fini(void)
306
struct proc_chip *chip;
310
unlock(&events_lock);
313
for_each_chip(chip) {
314
__ipoll_update_mask(chip->id, true, PRD_IPOLL_MASK);
321
/* Entry from the host above */
322
static int64_t opal_prd_msg(struct opal_prd_msg *msg)
326
/* fini is a little special: the kernel (which may not have the entire
327
* opal_prd_msg definition) can send a FINI message, so we don't check
329
if (msg->hdr.size >= sizeof(struct opal_prd_msg_header) &&
330
msg->hdr.type == OPAL_PRD_MSG_TYPE_FINI)
331
return prd_msg_handle_fini();
333
if (msg->hdr.size != sizeof(*msg))
334
return OPAL_PARAMETER;
336
switch (msg->hdr.type) {
337
case OPAL_PRD_MSG_TYPE_INIT:
338
rc = prd_msg_handle_init(msg);
340
case OPAL_PRD_MSG_TYPE_ATTN_ACK:
341
rc = prd_msg_handle_attn_ack(msg);
343
case OPAL_PRD_MSG_TYPE_OCC_RESET_NOTIFY:
344
rc = occ_msg_queue_occ_reset();
347
rc = OPAL_UNSUPPORTED;
355
struct proc_chip *chip;
357
/* mask everything */
359
for_each_chip(chip) {
360
__ipoll_update_mask(chip->id, true, PRD_IPOLL_MASK);
365
/* todo: FSP implementation */
366
queue_prd_msg = queue_prd_msg_nop;
368
queue_prd_msg = queue_prd_msg_hbrt;
369
opal_register(OPAL_PRD_MSG, opal_prd_msg, 1);
372
prd_node = dt_new(opal_node, "diagnostics");
373
dt_add_property_strings(prd_node, "compatible", "ibm,opal-prd");
376
void prd_register_reserved_memory(void)
378
struct mem_region *region;
383
lock(&mem_region_lock);
384
for (region = mem_region_next(NULL); region;
385
region = mem_region_next(region)) {
387
if (region->type != REGION_HW_RESERVED)
393
if (!dt_find_property(region->node, "ibm,prd-label")) {
394
dt_add_property_string(region->node, "ibm,prd-label",
398
unlock(&mem_region_lock);