1
/* Copyright 2013-2014 IBM Corp.
3
* Licensed under the Apache License, Version 2.0 (the "License");
4
* you may not use this file except in compliance with the License.
5
* You may obtain a copy of the License at
7
* http://www.apache.org/licenses/LICENSE-2.0
9
* Unless required by applicable law or agreed to in writing, software
10
* distributed under the License is distributed on an "AS IS" BASIS,
11
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13
* See the License for the specific language governing permissions and
14
* limitations under the License.
22
#include <time-utils.h>
29
* Note on how those operate:
31
* Because the RTC calls can be pretty slow, these functions will shoot
32
* an asynchronous request to the FSP (if none is already pending)
34
* The requests will return OPAL_BUSY_EVENT as long as the event has
37
* WARNING: An attempt at doing an RTC write while one is already pending
38
* will simply ignore the new arguments and continue returning
39
* OPAL_BUSY_EVENT. This is to be compatible with existing Linux code.
41
* Completion of the request will result in an event OPAL_EVENT_RTC
42
* being signaled, which will remain raised until a corresponding call
43
* to opal_rtc_read() or opal_rtc_write() finally returns OPAL_SUCCESS,
44
* at which point the operation is complete and the event cleared.
46
* If we end up taking longer than rtc_read_timeout_ms millieconds waiting
47
* for the response from a read request, we simply return a cached value (plus
48
* an offset calculated from the timebase. When the read request finally
49
* returns, we update our cache value accordingly.
51
* There is two separate set of state for reads and writes. If both are
52
* attempted at the same time, the event bit will remain set as long as either
53
* of the two has a pending event to signal.
58
/* All of the below state is protected by rtc_lock.
59
* It should be held for the shortest amount of time possible.
60
* Certainly not across calls to FSP.
62
static struct lock rtc_lock;
67
RTC_TOD_PERMANENT_ERROR,
68
} rtc_tod_state = RTC_TOD_INVALID;
70
/* State machine for getting an RTC request.
71
* RTC_{READ/WRITE}_NO_REQUEST -> RTC_{READ/WRITE}_PENDING_REQUEST (one in flight)
72
* RTC_{READ/WRITE}_PENDING_REQUEST -> RTC_{READ/WRITE}_REQUEST_AVAILABLE,
74
* RTC_{READ/WRITE}_REQUEST_AVAILABLE -> RTC_{READ/WRITE}_NO_REQUEST,
75
* when OS retrieves it
79
RTC_READ_PENDING_REQUEST,
80
RTC_READ_REQUEST_AVAILABLE,
81
} rtc_read_request_state = RTC_READ_NO_REQUEST;
85
RTC_WRITE_PENDING_REQUEST,
86
RTC_WRITE_REQUEST_AVAILABLE,
87
} rtc_write_request_state = RTC_WRITE_NO_REQUEST;
89
static bool rtc_tod_cache_dirty = false;
91
/* TODO We'd probably want to export and use this variable declared in fsp.c,
92
* instead of each component individually maintaining the state.. may be for
95
static bool fsp_in_reset = false;
97
struct opal_tpo_data {
98
uint64_t tpo_async_token;
99
uint32_t *year_month_day;
103
/* Timebase value when we last initiated a RTC read request */
104
static unsigned long read_req_tb;
106
/* If a RTC read takes longer than this, we return a value generated
107
* from the cache + timebase */
108
static const int rtc_read_timeout_ms = 1500;
110
DEFINE_LOG_ENTRY(OPAL_RC_RTC_TOD, OPAL_PLATFORM_ERR_EVT, OPAL_RTC,
111
OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA);
113
DEFINE_LOG_ENTRY(OPAL_RC_RTC_READ, OPAL_PLATFORM_ERR_EVT, OPAL_RTC,
114
OPAL_PLATFORM_FIRMWARE, OPAL_INFO, OPAL_NA);
116
static void fsp_tpo_req_complete(struct fsp_msg *read_resp)
118
struct opal_tpo_data *attr = read_resp->user_data;
122
val = (read_resp->resp->word1 >> 8) & 0xff;
124
case FSP_STATUS_TOD_RESET:
125
log_simple_error(&e_info(OPAL_RC_RTC_TOD),
126
"RTC TPO in invalid state\n");
127
rc = OPAL_INTERNAL_ERROR;
130
case FSP_STATUS_TOD_PERMANENT_ERROR:
131
log_simple_error(&e_info(OPAL_RC_RTC_TOD),
132
"RTC TPO in permanent error state\n");
133
rc = OPAL_INTERNAL_ERROR;
135
case FSP_STATUS_INVALID_DATA:
136
log_simple_error(&e_info(OPAL_RC_RTC_TOD),
137
"RTC TPO in permanent error state\n");
140
case FSP_STATUS_SUCCESS:
141
/* Save the read TPO value in our cache */
142
if (attr->year_month_day)
143
*(attr->year_month_day) =
144
read_resp->resp->data.words[0];
146
*(attr->hour_min) = read_resp->resp->data.words[1];
151
log_simple_error(&e_info(OPAL_RC_RTC_TOD),
152
"TPO read failed: %d\n", val);
153
rc = OPAL_INTERNAL_ERROR;
156
opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL,
157
attr->tpo_async_token, rc);
159
fsp_freemsg(read_resp);
162
static void fsp_rtc_process_read(struct fsp_msg *read_resp)
164
int val = (read_resp->word1 >> 8) & 0xff;
167
assert(lock_held_by_me(&rtc_lock));
169
assert(rtc_read_request_state == RTC_READ_PENDING_REQUEST);
172
case FSP_STATUS_TOD_RESET:
173
log_simple_error(&e_info(OPAL_RC_RTC_TOD),
174
"RTC TOD in invalid state\n");
175
rtc_tod_state = RTC_TOD_INVALID;
178
case FSP_STATUS_TOD_PERMANENT_ERROR:
179
log_simple_error(&e_info(OPAL_RC_RTC_TOD),
180
"RTC TOD in permanent error state\n");
181
rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
184
case FSP_STATUS_SUCCESS:
185
/* Save the read RTC value in our cache */
186
rtc_tod_state = RTC_TOD_VALID;
187
datetime_to_tm(read_resp->data.words[0],
188
(u64) read_resp->data.words[1] << 32, &tm);
189
rtc_cache_update(&tm);
190
prlog(PR_TRACE, "FSP-RTC Got time: %d-%d-%d %d:%d:%d\n",
191
tm.tm_year, tm.tm_mon, tm.tm_mday,
192
tm.tm_hour, tm.tm_min, tm.tm_sec);
196
log_simple_error(&e_info(OPAL_RC_RTC_TOD),
197
"RTC TOD read failed: %d\n", val);
198
rtc_tod_state = RTC_TOD_INVALID;
200
rtc_read_request_state = RTC_READ_REQUEST_AVAILABLE;
203
static void opal_rtc_eval_events(bool read_write)
205
bool request_available;
208
request_available = (rtc_read_request_state ==
209
RTC_READ_REQUEST_AVAILABLE);
211
request_available = (rtc_write_request_state ==
212
RTC_WRITE_REQUEST_AVAILABLE);
214
assert(lock_held_by_me(&rtc_lock));
215
opal_update_pending_evt(OPAL_EVENT_RTC,
216
request_available ? OPAL_EVENT_RTC : 0);
219
static void fsp_rtc_req_complete(struct fsp_msg *msg)
222
prlog(PR_TRACE, "RTC completion %p\n", msg);
224
if (fsp_msg_cmd(msg) == (FSP_CMD_READ_TOD & 0xffffff)) {
225
fsp_rtc_process_read(msg->resp);
226
opal_rtc_eval_events(true);
228
assert(rtc_write_request_state == RTC_WRITE_PENDING_REQUEST);
229
rtc_write_request_state = RTC_WRITE_REQUEST_AVAILABLE;
230
opal_rtc_eval_events(false);
237
static int64_t fsp_rtc_send_read_request(void)
242
assert(lock_held_by_me(&rtc_lock));
243
assert(rtc_read_request_state == RTC_READ_NO_REQUEST);
245
msg = fsp_mkmsg(FSP_CMD_READ_TOD, 0);
247
log_simple_error(&e_info(OPAL_RC_RTC_READ),
248
"RTC: failed to allocate read message\n");
249
return OPAL_INTERNAL_ERROR;
252
rc = fsp_queue_msg(msg, fsp_rtc_req_complete);
255
log_simple_error(&e_info(OPAL_RC_RTC_READ),
256
"RTC: failed to queue read message: %d\n", rc);
257
return OPAL_INTERNAL_ERROR;
260
rtc_read_request_state = RTC_READ_PENDING_REQUEST;
262
read_req_tb = mftb();
264
return OPAL_BUSY_EVENT;
267
static int64_t fsp_opal_rtc_read(uint32_t *year_month_day,
268
uint64_t *hour_minute_second_millisecond)
272
if (!year_month_day || !hour_minute_second_millisecond)
273
return OPAL_PARAMETER;
277
if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) {
282
/* During R/R of FSP, read cached TOD */
284
if (rtc_tod_state == RTC_TOD_VALID) {
285
rtc_cache_get_datetime(year_month_day,
286
hour_minute_second_millisecond);
289
rc = OPAL_INTERNAL_ERROR;
294
/* If we don't have a read pending already, fire off a request and
296
if (rtc_read_request_state == RTC_READ_NO_REQUEST) {
297
prlog(PR_TRACE, "Sending new RTC read request\n");
298
rc = fsp_rtc_send_read_request();
299
/* If our pending read is done, clear events and return the time
301
} else if (rtc_read_request_state == RTC_READ_REQUEST_AVAILABLE) {
302
prlog(PR_TRACE, "RTC read complete, state %d\n", rtc_tod_state);
303
rtc_read_request_state = RTC_READ_NO_REQUEST;
305
opal_rtc_eval_events(true);
307
if (rtc_tod_state == RTC_TOD_VALID) {
308
rtc_cache_get_datetime(year_month_day,
309
hour_minute_second_millisecond);
310
prlog(PR_TRACE,"FSP-RTC Cached datetime: %x %llx\n",
312
*hour_minute_second_millisecond);
315
rc = OPAL_INTERNAL_ERROR;
318
/* Timeout: return our cached value (updated from tb), but leave the
319
* read request pending so it will update the cache later */
320
} else if (mftb() > read_req_tb + msecs_to_tb(rtc_read_timeout_ms)) {
321
prlog(PR_TRACE, "RTC read timed out\n");
323
if (rtc_tod_state == RTC_TOD_VALID) {
324
rtc_cache_get_datetime(year_month_day,
325
hour_minute_second_millisecond);
328
rc = OPAL_INTERNAL_ERROR;
330
/* Otherwise, we're still waiting on the read to complete */
332
assert(rtc_read_request_state == RTC_READ_PENDING_REQUEST);
333
rc = OPAL_BUSY_EVENT;
340
static int64_t fsp_rtc_send_write_request(uint32_t year_month_day,
341
uint64_t hour_minute_second_millisecond)
347
assert(lock_held_by_me(&rtc_lock));
348
assert(rtc_write_request_state == RTC_WRITE_NO_REQUEST);
350
/* Create a request and send it. Just like for read, we ignore
351
* the "millisecond" field which is probably supposed to be
352
* microseconds and which Linux ignores as well anyway
355
w1 = (hour_minute_second_millisecond >> 32) & 0xffffff00;
358
msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, w0, w1, w2);
360
prlog(PR_TRACE, " -> allocation failed !\n");
361
return OPAL_INTERNAL_ERROR;
363
prlog(PR_TRACE, " -> req at %p\n", msg);
366
datetime_to_tm(msg->data.words[0],
367
(u64) msg->data.words[1] << 32, &tm);
368
rtc_cache_update(&tm);
369
rtc_tod_cache_dirty = true;
372
} else if (fsp_queue_msg(msg, fsp_rtc_req_complete)) {
373
prlog(PR_TRACE, " -> queueing failed !\n");
375
return OPAL_INTERNAL_ERROR;
378
rtc_write_request_state = RTC_WRITE_PENDING_REQUEST;
380
return OPAL_BUSY_EVENT;
383
static int64_t fsp_opal_rtc_write(uint32_t year_month_day,
384
uint64_t hour_minute_second_millisecond)
389
if (rtc_tod_state == RTC_TOD_PERMANENT_ERROR) {
394
if (rtc_write_request_state == RTC_WRITE_NO_REQUEST) {
395
prlog(PR_TRACE, "Sending new RTC write request\n");
396
rc = fsp_rtc_send_write_request(year_month_day,
397
hour_minute_second_millisecond);
398
} else if (rtc_write_request_state == RTC_WRITE_PENDING_REQUEST) {
399
rc = OPAL_BUSY_EVENT;
401
assert(rtc_write_request_state == RTC_WRITE_REQUEST_AVAILABLE);
402
rtc_write_request_state = RTC_WRITE_NO_REQUEST;
404
opal_rtc_eval_events(false);
413
/* Set timed power on values to fsp */
414
static int64_t fsp_opal_tpo_write(uint64_t async_token, uint32_t y_m_d,
417
static struct opal_tpo_data *attr;
421
return OPAL_HARDWARE;
423
attr = zalloc(sizeof(struct opal_tpo_data));
427
/* Create a request and send it.*/
428
attr->tpo_async_token = async_token;
430
prlog(PR_TRACE, "Sending TPO write request...\n");
432
msg = fsp_mkmsg(FSP_CMD_TPO_WRITE, 2, y_m_d, hr_min);
434
prerror("TPO: Failed to create message for WRITE to FSP\n");
436
return OPAL_INTERNAL_ERROR;
438
msg->user_data = attr;
439
if (fsp_queue_msg(msg, fsp_tpo_req_complete)) {
442
return OPAL_INTERNAL_ERROR;
444
return OPAL_ASYNC_COMPLETION;
447
/* Read Timed power on (TPO) from FSP */
448
static int64_t fsp_opal_tpo_read(uint64_t async_token, uint32_t *y_m_d,
451
static struct opal_tpo_data *attr;
456
return OPAL_HARDWARE;
458
if (!y_m_d || !hr_min)
459
return OPAL_PARAMETER;
461
attr = zalloc(sizeof(*attr));
465
/* Send read requet to FSP */
466
attr->tpo_async_token = async_token;
467
attr->year_month_day = y_m_d;
468
attr->hour_min = hr_min;
470
prlog(PR_TRACE, "Sending new TPO read request\n");
471
msg = fsp_mkmsg(FSP_CMD_TPO_READ, 0);
473
log_simple_error(&e_info(OPAL_RC_RTC_READ),
474
"TPO: failed to allocate read message\n");
476
return OPAL_INTERNAL_ERROR;
478
msg->user_data = attr;
479
rc = fsp_queue_msg(msg, fsp_tpo_req_complete);
483
log_simple_error(&e_info(OPAL_RC_RTC_READ),
484
"TPO: failed to queue read message: %lld\n", rc);
485
return OPAL_INTERNAL_ERROR;
487
return OPAL_ASYNC_COMPLETION;
490
static void rtc_flush_cached_tod(void)
496
if (rtc_cache_get_datetime(&y_m_d, &h_m_s_m))
498
msg = fsp_mkmsg(FSP_CMD_WRITE_TOD, 3, y_m_d,
499
(h_m_s_m >> 32) & 0xffffff00, 0);
501
prerror("TPO: %s : Failed to allocate write TOD message\n",
505
if (fsp_queue_msg(msg, fsp_freemsg)) {
507
prerror("TPO: %s : Failed to queue WRITE_TOD command\n",
513
static bool fsp_rtc_msg_rr(u32 cmd_sub_mod, struct fsp_msg *msg)
519
switch (cmd_sub_mod) {
520
case FSP_RESET_START:
526
case FSP_RELOAD_COMPLETE:
528
fsp_in_reset = false;
529
if (rtc_tod_cache_dirty) {
530
rtc_flush_cached_tod();
531
rtc_tod_cache_dirty = false;
541
static struct fsp_client fsp_rtc_client_rr = {
542
.message = fsp_rtc_msg_rr,
545
void fsp_rtc_init(void)
549
if (!fsp_present()) {
550
rtc_tod_state = RTC_TOD_PERMANENT_ERROR;
554
opal_register(OPAL_RTC_READ, fsp_opal_rtc_read, 2);
555
opal_register(OPAL_RTC_WRITE, fsp_opal_rtc_write, 2);
556
opal_register(OPAL_WRITE_TPO, fsp_opal_tpo_write, 3);
557
opal_register(OPAL_READ_TPO, fsp_opal_tpo_read, 3);
559
np = dt_new(opal_node, "rtc");
560
dt_add_property_strings(np, "compatible", "ibm,opal-rtc");
561
dt_add_property(np, "has-tpo", NULL, 0);
563
/* Register for the reset/reload event */
564
fsp_register_client(&fsp_rtc_client_rr, FSP_MCLASS_RR_EVENT);
566
prlog(PR_TRACE, "Getting initial RTC TOD\n");
568
/* We don't wait for RTC response and this is actually okay as
569
* any OPAL callers will wait correctly and if we ever have
570
* internal users then they should check the state properly
573
fsp_rtc_send_read_request();