2
* S/390 helper routines
4
* Copyright (c) 2009 Ulrich Hecht
5
* Copyright (c) 2009 Alexander Graf
7
* This library is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU Lesser General Public
9
* License as published by the Free Software Foundation; either
10
* version 2 of the License, or (at your option) any later version.
12
* This library is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* Lesser General Public License for more details.
17
* You should have received a copy of the GNU Lesser General Public
18
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
22
#include "dyngen-exec.h"
23
#include "host-utils.h"
27
#include "qemu-timer.h"
29
#include <linux/kvm.h>
32
#if !defined (CONFIG_USER_ONLY)
36
/*****************************************************************************/
38
#if !defined (CONFIG_USER_ONLY)
39
#include "softmmu_exec.h"
41
#define MMUSUFFIX _mmu
44
#include "softmmu_template.h"
47
#include "softmmu_template.h"
50
#include "softmmu_template.h"
53
#include "softmmu_template.h"
55
/* try to fill the TLB and return an exception if error. If retaddr is
56
NULL, it means that the function was called in C code (i.e. not
57
from generated code or from helper.c) */
58
/* XXX: fix it to restore all registers */
59
void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
69
ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
70
if (unlikely(ret != 0)) {
71
if (likely(retaddr)) {
72
/* now we have a real cpu fault */
73
pc = (unsigned long)retaddr;
76
/* the PC is inside the translated code. It means that we have
77
a virtual CPU fault */
78
cpu_restore_state(tb, env, pc);
88
/* #define DEBUG_HELPER */
90
#define HELPER_LOG(x...) qemu_log(x)
92
#define HELPER_LOG(x...)
95
/* raise an exception */
96
void HELPER(exception)(uint32_t excp)
98
HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
99
env->exception_index = excp;
103
#ifndef CONFIG_USER_ONLY
104
static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
107
target_phys_addr_t dest_phys;
108
target_phys_addr_t len = l;
110
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
113
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
115
cpu_abort(env, "should never reach here");
117
dest_phys |= dest & ~TARGET_PAGE_MASK;
119
dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
121
memset(dest_p, byte, len);
123
cpu_physical_memory_unmap(dest_p, 1, len, len);
126
static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
129
target_phys_addr_t dest_phys;
130
target_phys_addr_t src_phys;
131
target_phys_addr_t len = l;
134
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
137
if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
139
cpu_abort(env, "should never reach here");
141
dest_phys |= dest & ~TARGET_PAGE_MASK;
143
if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
145
cpu_abort(env, "should never reach here");
147
src_phys |= src & ~TARGET_PAGE_MASK;
149
dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
150
src_p = cpu_physical_memory_map(src_phys, &len, 0);
152
memmove(dest_p, src_p, len);
154
cpu_physical_memory_unmap(dest_p, 1, len, len);
155
cpu_physical_memory_unmap(src_p, 0, len, len);
160
uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
166
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
167
__FUNCTION__, l, dest, src);
168
for (i = 0; i <= l; i++) {
169
x = ldub(dest + i) & ldub(src + i);
179
uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
185
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
186
__FUNCTION__, l, dest, src);
188
#ifndef CONFIG_USER_ONLY
189
/* xor with itself is the same as memset(0) */
190
if ((l > 32) && (src == dest) &&
191
(src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
192
mvc_fast_memset(env, l + 1, dest, 0);
197
memset(g2h(dest), 0, l + 1);
202
for (i = 0; i <= l; i++) {
203
x = ldub(dest + i) ^ ldub(src + i);
213
uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
219
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
220
__FUNCTION__, l, dest, src);
221
for (i = 0; i <= l; i++) {
222
x = ldub(dest + i) | ldub(src + i);
232
void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
236
uint32_t l_64 = (l + 1) / 8;
238
HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
239
__FUNCTION__, l, dest, src);
241
#ifndef CONFIG_USER_ONLY
243
(src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
244
(dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
245
if (dest == (src + 1)) {
246
mvc_fast_memset(env, l + 1, dest, ldub(src));
248
} else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
249
mvc_fast_memmove(env, l + 1, dest, src);
254
if (dest == (src + 1)) {
255
memset(g2h(dest), ldub(src), l + 1);
258
memmove(g2h(dest), g2h(src), l + 1);
263
/* handle the parts that fit into 8-byte loads/stores */
264
if (dest != (src + 1)) {
265
for (i = 0; i < l_64; i++) {
266
stq(dest + x, ldq(src + x));
271
/* slow version crossing pages with byte accesses */
272
for (i = x; i <= l; i++) {
273
stb(dest + i, ldub(src + i));
277
/* compare unsigned byte arrays */
278
uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
283
HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
284
__FUNCTION__, l, s1, s2);
285
for (i = 0; i <= l; i++) {
288
HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
303
/* compare logical under mask */
304
uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
308
HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
314
r = (r1 & 0xff000000UL) >> 24;
315
HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
326
mask = (mask << 1) & 0xf;
333
/* store character under mask */
334
void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
337
HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
341
r = (r1 & 0xff000000UL) >> 24;
343
HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
346
mask = (mask << 1) & 0xf;
352
/* 64/64 -> 128 unsigned multiplication */
353
void HELPER(mlg)(uint32_t r1, uint64_t v2)
355
#if HOST_LONG_BITS == 64 && defined(__GNUC__)
356
/* assuming 64-bit hosts have __uint128_t */
357
__uint128_t res = (__uint128_t)env->regs[r1 + 1];
358
res *= (__uint128_t)v2;
359
env->regs[r1] = (uint64_t)(res >> 64);
360
env->regs[r1 + 1] = (uint64_t)res;
362
mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
366
/* 128 -> 64/64 unsigned division */
367
void HELPER(dlg)(uint32_t r1, uint64_t v2)
369
uint64_t divisor = v2;
371
if (!env->regs[r1]) {
372
/* 64 -> 64/64 case */
373
env->regs[r1] = env->regs[r1+1] % divisor;
374
env->regs[r1+1] = env->regs[r1+1] / divisor;
378
#if HOST_LONG_BITS == 64 && defined(__GNUC__)
379
/* assuming 64-bit hosts have __uint128_t */
380
__uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
382
__uint128_t quotient = dividend / divisor;
383
env->regs[r1+1] = quotient;
384
__uint128_t remainder = dividend % divisor;
385
env->regs[r1] = remainder;
387
/* 32-bit hosts would need special wrapper functionality - just abort if
388
we encounter such a case; it's very unlikely anyways. */
389
cpu_abort(env, "128 -> 64/64 division not implemented\n");
394
static inline uint64_t get_address(int x2, int b2, int d2)
407
if (!(env->psw.mask & PSW_MASK_64)) {
414
static inline uint64_t get_address_31fix(int reg)
416
uint64_t r = env->regs[reg];
419
if (!(env->psw.mask & PSW_MASK_64)) {
426
/* search string (c is byte to search, r2 is string, r1 end of string) */
427
uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
431
uint64_t str = get_address_31fix(r2);
432
uint64_t end = get_address_31fix(r1);
434
HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
435
c, env->regs[r1], env->regs[r2]);
437
for (i = str; i != end; i++) {
448
/* unsigned string compare (c is string terminator) */
449
uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
451
uint64_t s1 = get_address_31fix(r1);
452
uint64_t s2 = get_address_31fix(r2);
456
#ifdef CONFIG_USER_ONLY
458
HELPER_LOG("%s: comparing '%s' and '%s'\n",
459
__FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
465
if ((v1 == c || v2 == c) || (v1 != v2)) {
475
cc = (v1 < v2) ? 1 : 2;
476
/* FIXME: 31-bit mode! */
484
void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
486
/* XXX missing r0 handling */
487
#ifdef CONFIG_USER_ONLY
490
for (i = 0; i < TARGET_PAGE_SIZE; i++) {
491
stb(r1 + i, ldub(r2 + i));
494
mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
498
/* string copy (c is string terminator) */
499
void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
501
uint64_t dest = get_address_31fix(r1);
502
uint64_t src = get_address_31fix(r2);
505
#ifdef CONFIG_USER_ONLY
507
HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
520
env->regs[r1] = dest; /* FIXME: 31-bit mode! */
523
/* compare and swap 64-bit */
524
uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
526
/* FIXME: locking? */
528
uint64_t v2 = ldq(a2);
529
if (env->regs[r1] == v2) {
531
stq(a2, env->regs[r3]);
539
/* compare double and swap 64-bit */
540
uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
542
/* FIXME: locking? */
544
uint64_t v2_hi = ldq(a2);
545
uint64_t v2_lo = ldq(a2 + 8);
546
uint64_t v1_hi = env->regs[r1];
547
uint64_t v1_lo = env->regs[r1 + 1];
549
if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
551
stq(a2, env->regs[r3]);
552
stq(a2 + 8, env->regs[r3 + 1]);
555
env->regs[r1] = v2_hi;
556
env->regs[r1 + 1] = v2_lo;
562
/* compare and swap 32-bit */
563
uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
565
/* FIXME: locking? */
567
HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
568
uint32_t v2 = ldl(a2);
569
if (((uint32_t)env->regs[r1]) == v2) {
571
stl(a2, (uint32_t)env->regs[r3]);
574
env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
579
static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
581
int pos = 24; /* top of the lower half of r1 */
582
uint64_t rmask = 0xff000000ULL;
589
env->regs[r1] &= ~rmask;
591
if ((val & 0x80) && !ccd) {
595
if (val && cc == 0) {
598
env->regs[r1] |= (uint64_t)val << pos;
601
mask = (mask << 1) & 0xf;
609
/* execute instruction
610
this instruction executes an insn modified with the contents of r1
611
it does not change the executed instruction in memory
612
it does not change the program counter
613
in other words: tricky...
614
currently implemented by interpreting the cases it is most commonly used in
616
uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
618
uint16_t insn = lduw_code(addr);
619
HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
621
if ((insn & 0xf0ff) == 0xd000) {
622
uint32_t l, insn2, b1, b2, d1, d2;
624
insn2 = ldl_code(addr + 2);
625
b1 = (insn2 >> 28) & 0xf;
626
b2 = (insn2 >> 12) & 0xf;
627
d1 = (insn2 >> 16) & 0xfff;
629
switch (insn & 0xf00) {
631
helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
634
cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
637
cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
643
} else if ((insn & 0xff00) == 0x0a00) {
644
/* supervisor call */
645
HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
646
env->psw.addr = ret - 4;
647
env->int_svc_code = (insn|v1) & 0xff;
648
env->int_svc_ilc = 4;
649
helper_exception(EXCP_SVC);
650
} else if ((insn & 0xff00) == 0xbf00) {
651
uint32_t insn2, r1, r3, b2, d2;
652
insn2 = ldl_code(addr + 2);
653
r1 = (insn2 >> 20) & 0xf;
654
r3 = (insn2 >> 16) & 0xf;
655
b2 = (insn2 >> 12) & 0xf;
657
cc = helper_icm(r1, get_address(0, b2, d2), r3);
660
cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
666
/* absolute value 32-bit */
667
uint32_t HELPER(abs_i32)(int32_t val)
676
/* negative absolute value 32-bit */
677
int32_t HELPER(nabs_i32)(int32_t val)
686
/* absolute value 64-bit */
687
uint64_t HELPER(abs_i64)(int64_t val)
689
HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
698
/* negative absolute value 64-bit */
699
int64_t HELPER(nabs_i64)(int64_t val)
708
/* add with carry 32-bit unsigned */
709
uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
721
/* store character under mask high operates on the upper half of r1 */
722
void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
724
int pos = 56; /* top of the upper half of r1 */
728
stb(address, (env->regs[r1] >> pos) & 0xff);
731
mask = (mask << 1) & 0xf;
736
/* insert character under mask high; same as icm, but operates on the
738
uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
740
int pos = 56; /* top of the upper half of r1 */
741
uint64_t rmask = 0xff00000000000000ULL;
748
env->regs[r1] &= ~rmask;
750
if ((val & 0x80) && !ccd) {
754
if (val && cc == 0) {
757
env->regs[r1] |= (uint64_t)val << pos;
760
mask = (mask << 1) & 0xf;
768
/* insert psw mask and condition code into r1 */
769
void HELPER(ipm)(uint32_t cc, uint32_t r1)
771
uint64_t r = env->regs[r1];
773
r &= 0xffffffff00ffffffULL;
774
r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
776
HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
777
cc, env->psw.mask, r);
780
/* load access registers r1 to r3 from memory at a2 */
781
void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
785
for (i = r1;; i = (i + 1) % 16) {
786
env->aregs[i] = ldl(a2);
795
/* store access registers r1 to r3 in memory at a2 */
796
void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
800
for (i = r1;; i = (i + 1) % 16) {
801
stl(a2, env->aregs[i]);
811
uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
813
uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
814
uint64_t dest = get_address_31fix(r1);
815
uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
816
uint64_t src = get_address_31fix(r2);
817
uint8_t pad = src >> 24;
821
if (destlen == srclen) {
823
} else if (destlen < srclen) {
829
if (srclen > destlen) {
833
for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
838
for (; destlen; dest++, destlen--) {
842
env->regs[r1 + 1] = destlen;
843
/* can't use srclen here, we trunc'ed it */
844
env->regs[r2 + 1] -= src - env->regs[r2];
845
env->regs[r1] = dest;
851
/* move long extended another memcopy insn with more bells and whistles */
852
uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
854
uint64_t destlen = env->regs[r1 + 1];
855
uint64_t dest = env->regs[r1];
856
uint64_t srclen = env->regs[r3 + 1];
857
uint64_t src = env->regs[r3];
858
uint8_t pad = a2 & 0xff;
862
if (!(env->psw.mask & PSW_MASK_64)) {
863
destlen = (uint32_t)destlen;
864
srclen = (uint32_t)srclen;
869
if (destlen == srclen) {
871
} else if (destlen < srclen) {
877
if (srclen > destlen) {
881
for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
886
for (; destlen; dest++, destlen--) {
890
env->regs[r1 + 1] = destlen;
891
/* can't use srclen here, we trunc'ed it */
892
/* FIXME: 31-bit mode! */
893
env->regs[r3 + 1] -= src - env->regs[r3];
894
env->regs[r1] = dest;
900
/* compare logical long extended memcompare insn with padding */
901
uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
903
uint64_t destlen = env->regs[r1 + 1];
904
uint64_t dest = get_address_31fix(r1);
905
uint64_t srclen = env->regs[r3 + 1];
906
uint64_t src = get_address_31fix(r3);
907
uint8_t pad = a2 & 0xff;
908
uint8_t v1 = 0,v2 = 0;
911
if (!(destlen || srclen)) {
915
if (srclen > destlen) {
919
for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
920
v1 = srclen ? ldub(src) : pad;
921
v2 = destlen ? ldub(dest) : pad;
923
cc = (v1 < v2) ? 1 : 2;
928
env->regs[r1 + 1] = destlen;
929
/* can't use srclen here, we trunc'ed it */
930
env->regs[r3 + 1] -= src - env->regs[r3];
931
env->regs[r1] = dest;
937
/* subtract unsigned v2 from v1 with borrow */
938
uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
940
uint32_t v1 = env->regs[r1];
941
uint32_t res = v1 + (~v2) + (cc >> 1);
943
env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
952
/* subtract unsigned v2 from v1 with borrow */
953
uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
955
uint64_t res = v1 + (~v2) + (cc >> 1);
966
static inline int float_comp_to_cc(int float_compare)
968
switch (float_compare) {
969
case float_relation_equal:
971
case float_relation_less:
973
case float_relation_greater:
975
case float_relation_unordered:
978
cpu_abort(env, "unknown return value for float compare\n");
982
/* condition codes for binary FP ops */
983
static uint32_t set_cc_f32(float32 v1, float32 v2)
985
return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
988
static uint32_t set_cc_f64(float64 v1, float64 v2)
990
return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
993
/* condition codes for unary FP ops */
994
static uint32_t set_cc_nz_f32(float32 v)
996
if (float32_is_any_nan(v)) {
998
} else if (float32_is_zero(v)) {
1000
} else if (float32_is_neg(v)) {
1007
static uint32_t set_cc_nz_f64(float64 v)
1009
if (float64_is_any_nan(v)) {
1011
} else if (float64_is_zero(v)) {
1013
} else if (float64_is_neg(v)) {
1020
static uint32_t set_cc_nz_f128(float128 v)
1022
if (float128_is_any_nan(v)) {
1024
} else if (float128_is_zero(v)) {
1026
} else if (float128_is_neg(v)) {
1033
/* convert 32-bit int to 64-bit float */
1034
void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1036
HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1037
env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1040
/* convert 32-bit int to 128-bit float */
1041
void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1044
v1.q = int32_to_float128(v2, &env->fpu_status);
1045
env->fregs[f1].ll = v1.ll.upper;
1046
env->fregs[f1 + 2].ll = v1.ll.lower;
1049
/* convert 64-bit int to 32-bit float */
1050
void HELPER(cegbr)(uint32_t f1, int64_t v2)
1052
HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1053
env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1056
/* convert 64-bit int to 64-bit float */
1057
void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1059
HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1060
env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1063
/* convert 64-bit int to 128-bit float */
1064
void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1067
x1.q = int64_to_float128(v2, &env->fpu_status);
1068
HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1069
x1.ll.upper, x1.ll.lower);
1070
env->fregs[f1].ll = x1.ll.upper;
1071
env->fregs[f1 + 2].ll = x1.ll.lower;
1074
/* convert 32-bit int to 32-bit float */
1075
void HELPER(cefbr)(uint32_t f1, int32_t v2)
1077
env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1078
HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1079
env->fregs[f1].l.upper, f1);
1082
/* 32-bit FP addition RR */
1083
uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1085
env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1086
env->fregs[f2].l.upper,
1088
HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1089
env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1091
return set_cc_nz_f32(env->fregs[f1].l.upper);
1094
/* 64-bit FP addition RR */
1095
uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1097
env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1099
HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1100
env->fregs[f2].d, env->fregs[f1].d, f1);
1102
return set_cc_nz_f64(env->fregs[f1].d);
1105
/* 32-bit FP subtraction RR */
1106
uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1108
env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1109
env->fregs[f2].l.upper,
1111
HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1112
env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1114
return set_cc_nz_f32(env->fregs[f1].l.upper);
1117
/* 64-bit FP subtraction RR */
1118
uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1120
env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1122
HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1123
__FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1125
return set_cc_nz_f64(env->fregs[f1].d);
1128
/* 32-bit FP division RR */
1129
void HELPER(debr)(uint32_t f1, uint32_t f2)
1131
env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1132
env->fregs[f2].l.upper,
1136
/* 128-bit FP division RR */
1137
void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1140
v1.ll.upper = env->fregs[f1].ll;
1141
v1.ll.lower = env->fregs[f1 + 2].ll;
1143
v2.ll.upper = env->fregs[f2].ll;
1144
v2.ll.lower = env->fregs[f2 + 2].ll;
1146
res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1147
env->fregs[f1].ll = res.ll.upper;
1148
env->fregs[f1 + 2].ll = res.ll.lower;
1151
/* 64-bit FP multiplication RR */
1152
void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1154
env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1158
/* 128-bit FP multiplication RR */
1159
void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1162
v1.ll.upper = env->fregs[f1].ll;
1163
v1.ll.lower = env->fregs[f1 + 2].ll;
1165
v2.ll.upper = env->fregs[f2].ll;
1166
v2.ll.lower = env->fregs[f2 + 2].ll;
1168
res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1169
env->fregs[f1].ll = res.ll.upper;
1170
env->fregs[f1 + 2].ll = res.ll.lower;
1173
/* convert 32-bit float to 64-bit float */
1174
void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1176
env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1180
/* convert 128-bit float to 64-bit float */
1181
void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1184
x2.ll.upper = env->fregs[f2].ll;
1185
x2.ll.lower = env->fregs[f2 + 2].ll;
1186
env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1187
HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1190
/* convert 64-bit float to 128-bit float */
1191
void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1194
res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1195
env->fregs[f1].ll = res.ll.upper;
1196
env->fregs[f1 + 2].ll = res.ll.lower;
1199
/* convert 64-bit float to 32-bit float */
1200
void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1202
float64 d2 = env->fregs[f2].d;
1203
env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1206
/* convert 128-bit float to 32-bit float */
1207
void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1210
x2.ll.upper = env->fregs[f2].ll;
1211
x2.ll.lower = env->fregs[f2 + 2].ll;
1212
env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1213
HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1216
/* absolute value of 32-bit float */
1217
uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1220
float32 v2 = env->fregs[f2].d;
1221
v1 = float32_abs(v2);
1222
env->fregs[f1].d = v1;
1223
return set_cc_nz_f32(v1);
1226
/* absolute value of 64-bit float */
1227
uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1230
float64 v2 = env->fregs[f2].d;
1231
v1 = float64_abs(v2);
1232
env->fregs[f1].d = v1;
1233
return set_cc_nz_f64(v1);
1236
/* absolute value of 128-bit float */
1237
uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1241
v2.ll.upper = env->fregs[f2].ll;
1242
v2.ll.lower = env->fregs[f2 + 2].ll;
1243
v1.q = float128_abs(v2.q);
1244
env->fregs[f1].ll = v1.ll.upper;
1245
env->fregs[f1 + 2].ll = v1.ll.lower;
1246
return set_cc_nz_f128(v1.q);
1249
/* load and test 64-bit float */
1250
uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1252
env->fregs[f1].d = env->fregs[f2].d;
1253
return set_cc_nz_f64(env->fregs[f1].d);
1256
/* load and test 32-bit float */
1257
uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1259
env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1260
return set_cc_nz_f32(env->fregs[f1].l.upper);
1263
/* load and test 128-bit float */
1264
uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1267
x.ll.upper = env->fregs[f2].ll;
1268
x.ll.lower = env->fregs[f2 + 2].ll;
1269
env->fregs[f1].ll = x.ll.upper;
1270
env->fregs[f1 + 2].ll = x.ll.lower;
1271
return set_cc_nz_f128(x.q);
1274
/* load complement of 32-bit float */
1275
uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1277
env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1279
return set_cc_nz_f32(env->fregs[f1].l.upper);
1282
/* load complement of 64-bit float */
1283
uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1285
env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1287
return set_cc_nz_f64(env->fregs[f1].d);
1290
/* load complement of 128-bit float */
1291
uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1294
x2.ll.upper = env->fregs[f2].ll;
1295
x2.ll.lower = env->fregs[f2 + 2].ll;
1296
x1.q = float128_chs(x2.q);
1297
env->fregs[f1].ll = x1.ll.upper;
1298
env->fregs[f1 + 2].ll = x1.ll.lower;
1299
return set_cc_nz_f128(x1.q);
1302
/* 32-bit FP addition RM */
1303
void HELPER(aeb)(uint32_t f1, uint32_t val)
1305
float32 v1 = env->fregs[f1].l.upper;
1308
HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1310
env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1313
/* 32-bit FP division RM */
1314
void HELPER(deb)(uint32_t f1, uint32_t val)
1316
float32 v1 = env->fregs[f1].l.upper;
1319
HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1321
env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1324
/* 32-bit FP multiplication RM */
1325
void HELPER(meeb)(uint32_t f1, uint32_t val)
1327
float32 v1 = env->fregs[f1].l.upper;
1330
HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1332
env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1335
/* 32-bit FP compare RR */
1336
uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1338
float32 v1 = env->fregs[f1].l.upper;
1339
float32 v2 = env->fregs[f2].l.upper;;
1340
HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1342
return set_cc_f32(v1, v2);
1345
/* 64-bit FP compare RR */
1346
uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1348
float64 v1 = env->fregs[f1].d;
1349
float64 v2 = env->fregs[f2].d;;
1350
HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1352
return set_cc_f64(v1, v2);
1355
/* 128-bit FP compare RR */
1356
uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1359
v1.ll.upper = env->fregs[f1].ll;
1360
v1.ll.lower = env->fregs[f1 + 2].ll;
1362
v2.ll.upper = env->fregs[f2].ll;
1363
v2.ll.lower = env->fregs[f2 + 2].ll;
1365
return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1369
/* 64-bit FP compare RM */
1370
uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1372
float64 v1 = env->fregs[f1].d;
1375
HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1377
return set_cc_f64(v1, v2.d);
1380
/* 64-bit FP addition RM */
1381
uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1383
float64 v1 = env->fregs[f1].d;
1386
HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1388
env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1389
return set_cc_nz_f64(v1);
1392
/* 32-bit FP subtraction RM */
1393
void HELPER(seb)(uint32_t f1, uint32_t val)
1395
float32 v1 = env->fregs[f1].l.upper;
1398
env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1401
/* 64-bit FP subtraction RM */
1402
uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1404
float64 v1 = env->fregs[f1].d;
1407
env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1408
return set_cc_nz_f64(v1);
1411
/* 64-bit FP multiplication RM */
1412
void HELPER(mdb)(uint32_t f1, uint64_t a2)
1414
float64 v1 = env->fregs[f1].d;
1417
HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1419
env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1422
/* 64-bit FP division RM */
1423
void HELPER(ddb)(uint32_t f1, uint64_t a2)
1425
float64 v1 = env->fregs[f1].d;
1428
HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1430
env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1433
static void set_round_mode(int m3)
1440
/* biased round no nearest */
1442
/* round to nearest */
1443
set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1447
set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1451
set_float_rounding_mode(float_round_up, &env->fpu_status);
1455
set_float_rounding_mode(float_round_down, &env->fpu_status);
1460
/* convert 32-bit float to 64-bit int */
1461
uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1463
float32 v2 = env->fregs[f2].l.upper;
1465
env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1466
return set_cc_nz_f32(v2);
1469
/* convert 64-bit float to 64-bit int */
1470
uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1472
float64 v2 = env->fregs[f2].d;
1474
env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1475
return set_cc_nz_f64(v2);
1478
/* convert 128-bit float to 64-bit int */
1479
uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1482
v2.ll.upper = env->fregs[f2].ll;
1483
v2.ll.lower = env->fregs[f2 + 2].ll;
1485
env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1486
if (float128_is_any_nan(v2.q)) {
1488
} else if (float128_is_zero(v2.q)) {
1490
} else if (float128_is_neg(v2.q)) {
1497
/* convert 32-bit float to 32-bit int */
1498
uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1500
float32 v2 = env->fregs[f2].l.upper;
1502
env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1503
float32_to_int32(v2, &env->fpu_status);
1504
return set_cc_nz_f32(v2);
1507
/* convert 64-bit float to 32-bit int */
1508
uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1510
float64 v2 = env->fregs[f2].d;
1512
env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1513
float64_to_int32(v2, &env->fpu_status);
1514
return set_cc_nz_f64(v2);
1517
/* convert 128-bit float to 32-bit int */
1518
uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1521
v2.ll.upper = env->fregs[f2].ll;
1522
v2.ll.lower = env->fregs[f2 + 2].ll;
1523
env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1524
float128_to_int32(v2.q, &env->fpu_status);
1525
return set_cc_nz_f128(v2.q);
1528
/* load 32-bit FP zero */
1529
void HELPER(lzer)(uint32_t f1)
1531
env->fregs[f1].l.upper = float32_zero;
1534
/* load 64-bit FP zero */
1535
void HELPER(lzdr)(uint32_t f1)
1537
env->fregs[f1].d = float64_zero;
1540
/* load 128-bit FP zero */
1541
void HELPER(lzxr)(uint32_t f1)
1544
x.q = float64_to_float128(float64_zero, &env->fpu_status);
1545
env->fregs[f1].ll = x.ll.upper;
1546
env->fregs[f1 + 1].ll = x.ll.lower;
1549
/* 128-bit FP subtraction RR */
1550
uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1553
v1.ll.upper = env->fregs[f1].ll;
1554
v1.ll.lower = env->fregs[f1 + 2].ll;
1556
v2.ll.upper = env->fregs[f2].ll;
1557
v2.ll.lower = env->fregs[f2 + 2].ll;
1559
res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1560
env->fregs[f1].ll = res.ll.upper;
1561
env->fregs[f1 + 2].ll = res.ll.lower;
1562
return set_cc_nz_f128(res.q);
1565
/* 128-bit FP addition RR */
1566
uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1569
v1.ll.upper = env->fregs[f1].ll;
1570
v1.ll.lower = env->fregs[f1 + 2].ll;
1572
v2.ll.upper = env->fregs[f2].ll;
1573
v2.ll.lower = env->fregs[f2 + 2].ll;
1575
res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1576
env->fregs[f1].ll = res.ll.upper;
1577
env->fregs[f1 + 2].ll = res.ll.lower;
1578
return set_cc_nz_f128(res.q);
1581
/* 32-bit FP multiplication RR */
1582
void HELPER(meebr)(uint32_t f1, uint32_t f2)
1584
env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1585
env->fregs[f2].l.upper,
1589
/* 64-bit FP division RR */
1590
void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1592
env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1596
/* 64-bit FP multiply and add RM */
1597
void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1599
HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1602
env->fregs[f1].d = float64_add(env->fregs[f1].d,
1603
float64_mul(v2.d, env->fregs[f3].d,
1608
/* 64-bit FP multiply and add RR */
1609
void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1611
HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1612
env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1615
env->fregs[f1].d, &env->fpu_status);
1618
/* 64-bit FP multiply and subtract RR */
1619
void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1621
HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1622
env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1625
env->fregs[f1].d, &env->fpu_status);
1628
/* 32-bit FP multiply and add RR */
1629
void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1631
env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1632
float32_mul(env->fregs[f2].l.upper,
1633
env->fregs[f3].l.upper,
1638
/* convert 32-bit float to 64-bit float */
1639
void HELPER(ldeb)(uint32_t f1, uint64_t a2)
1643
env->fregs[f1].d = float32_to_float64(v2,
1647
/* convert 64-bit float to 128-bit float */
1648
void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1653
v1.q = float64_to_float128(v2.d, &env->fpu_status);
1654
env->fregs[f1].ll = v1.ll.upper;
1655
env->fregs[f1 + 2].ll = v1.ll.lower;
1658
/* test data class 32-bit */
1659
uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1661
float32 v1 = env->fregs[f1].l.upper;
1662
int neg = float32_is_neg(v1);
1665
HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1666
if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1667
(float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1668
(float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1669
(float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1671
} else if (m2 & (1 << (9-neg))) {
1672
/* assume normalized number */
1676
/* FIXME: denormalized? */
1680
/* test data class 64-bit */
1681
uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1683
float64 v1 = env->fregs[f1].d;
1684
int neg = float64_is_neg(v1);
1687
HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1688
if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1689
(float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1690
(float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1691
(float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1693
} else if (m2 & (1 << (9-neg))) {
1694
/* assume normalized number */
1697
/* FIXME: denormalized? */
1701
/* test data class 128-bit */
1702
uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1706
v1.ll.upper = env->fregs[f1].ll;
1707
v1.ll.lower = env->fregs[f1 + 2].ll;
1709
int neg = float128_is_neg(v1.q);
1710
if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1711
(float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1712
(float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1713
(float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1715
} else if (m2 & (1 << (9-neg))) {
1716
/* assume normalized number */
1719
/* FIXME: denormalized? */
1723
/* find leftmost one */
1724
uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1729
while (!(v2 & 0x8000000000000000ULL) && v2) {
1736
env->regs[r1 + 1] = 0;
1739
env->regs[r1] = res;
1740
env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1745
/* square root 64-bit RR */
1746
void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1748
env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1752
void HELPER(cksm)(uint32_t r1, uint32_t r2)
1754
uint64_t src = get_address_31fix(r2);
1755
uint64_t src_len = env->regs[(r2 + 1) & 15];
1756
uint64_t cksm = (uint32_t)env->regs[r1];
1758
while (src_len >= 4) {
1761
/* move to next word */
1770
cksm += ldub(src) << 24;
1773
cksm += lduw(src) << 16;
1776
cksm += lduw(src) << 16;
1777
cksm += ldub(src + 2) << 8;
1781
/* indicate we've processed everything */
1782
env->regs[r2] = src + src_len;
1783
env->regs[(r2 + 1) & 15] = 0;
1786
env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1787
((uint32_t)cksm + (cksm >> 32));
1790
static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
1795
} else if (src < dst) {
1802
static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
1804
return cc_calc_ltgt_32(env, dst, 0);
1807
static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
1812
} else if (src < dst) {
1819
static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
1821
return cc_calc_ltgt_64(env, dst, 0);
1824
static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
1829
} else if (src < dst) {
1836
static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
1841
} else if (src < dst) {
1848
static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
1850
HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1851
uint16_t r = val & mask;
1852
if (r == 0 || mask == 0) {
1854
} else if (r == mask) {
1861
/* set condition code for test under mask */
1862
static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
1864
uint16_t r = val & mask;
1865
HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1866
if (r == 0 || mask == 0) {
1868
} else if (r == mask) {
1871
while (!(mask & 0x8000)) {
1883
static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
1888
static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
1891
if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1892
return 3; /* overflow */
1896
} else if (ar > 0) {
1904
static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
1914
if (ar < a1 || ar < a2) {
1922
static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
1925
if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1926
return 3; /* overflow */
1930
} else if (ar > 0) {
1938
static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
1952
static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
1954
if ((uint64_t)dst == 0x8000000000000000ULL) {
1963
static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
1968
static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
1970
if ((uint64_t)dst == 0x8000000000000000ULL) {
1972
} else if (dst < 0) {
1974
} else if (dst > 0) {
1982
static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
1985
if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1986
return 3; /* overflow */
1990
} else if (ar > 0) {
1998
static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
2008
if (ar < a1 || ar < a2) {
2016
static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
2019
if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2020
return 3; /* overflow */
2024
} else if (ar > 0) {
2032
static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
2046
static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
2048
if ((uint32_t)dst == 0x80000000UL) {
2057
static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
2062
static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
2064
if ((uint32_t)dst == 0x80000000UL) {
2066
} else if (dst < 0) {
2068
} else if (dst > 0) {
2075
/* calculate condition code for insert character under mask insn */
2076
static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
2078
HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2084
} else if (val & 0x80000000) {
2091
if (!val || !mask) {
2107
static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
2109
uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2112
/* check if the sign bit stays the same */
2113
if (src & (1ULL << 63)) {
2119
if ((src & mask) != match) {
2124
r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2126
if ((int64_t)r == 0) {
2128
} else if ((int64_t)r < 0) {
2136
static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
2137
uint64_t dst, uint64_t vr)
2146
/* cc_op value _is_ cc */
2149
case CC_OP_LTGT0_32:
2150
r = cc_calc_ltgt0_32(env, dst);
2152
case CC_OP_LTGT0_64:
2153
r = cc_calc_ltgt0_64(env, dst);
2156
r = cc_calc_ltgt_32(env, src, dst);
2159
r = cc_calc_ltgt_64(env, src, dst);
2161
case CC_OP_LTUGTU_32:
2162
r = cc_calc_ltugtu_32(env, src, dst);
2164
case CC_OP_LTUGTU_64:
2165
r = cc_calc_ltugtu_64(env, src, dst);
2168
r = cc_calc_tm_32(env, src, dst);
2171
r = cc_calc_tm_64(env, src, dst);
2174
r = cc_calc_nz(env, dst);
2177
r = cc_calc_add_64(env, src, dst, vr);
2180
r = cc_calc_addu_64(env, src, dst, vr);
2183
r = cc_calc_sub_64(env, src, dst, vr);
2186
r = cc_calc_subu_64(env, src, dst, vr);
2189
r = cc_calc_abs_64(env, dst);
2192
r = cc_calc_nabs_64(env, dst);
2195
r = cc_calc_comp_64(env, dst);
2199
r = cc_calc_add_32(env, src, dst, vr);
2202
r = cc_calc_addu_32(env, src, dst, vr);
2205
r = cc_calc_sub_32(env, src, dst, vr);
2208
r = cc_calc_subu_32(env, src, dst, vr);
2211
r = cc_calc_abs_64(env, dst);
2214
r = cc_calc_nabs_64(env, dst);
2217
r = cc_calc_comp_32(env, dst);
2221
r = cc_calc_icm_32(env, src, dst);
2224
r = cc_calc_slag(env, src, dst);
2227
case CC_OP_LTGT_F32:
2228
r = set_cc_f32(src, dst);
2230
case CC_OP_LTGT_F64:
2231
r = set_cc_f64(src, dst);
2234
r = set_cc_nz_f32(dst);
2237
r = set_cc_nz_f64(dst);
2241
cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2244
HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2245
cc_name(cc_op), src, dst, vr, r);
2249
uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2252
return do_calc_cc(env, cc_op, src, dst, vr);
2255
uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2258
return do_calc_cc(env, cc_op, src, dst, vr);
2261
uint64_t HELPER(cvd)(int32_t bin)
2264
uint64_t dec = 0x0c;
2272
for (shift = 4; (shift < 64) && bin; shift += 4) {
2273
int current_number = bin % 10;
2275
dec |= (current_number) << shift;
2282
void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2284
int len_dest = len >> 4;
2285
int len_src = len & 0xf;
2287
int second_nibble = 0;
2292
/* last byte is special, it only flips the nibbles */
2294
stb(dest, (b << 4) | (b >> 4));
2298
/* now pad every nibble with 0xf0 */
2300
while (len_dest > 0) {
2301
uint8_t cur_byte = 0;
2304
cur_byte = ldub(src);
2310
/* only advance one nibble at a time */
2311
if (second_nibble) {
2316
second_nibble = !second_nibble;
2319
cur_byte = (cur_byte & 0xf);
2323
stb(dest, cur_byte);
2327
void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2331
for (i = 0; i <= len; i++) {
2332
uint8_t byte = ldub(array + i);
2333
uint8_t new_byte = ldub(trans + byte);
2334
stb(array + i, new_byte);
2338
#ifndef CONFIG_USER_ONLY
2340
void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2342
load_psw(env, mask, addr);
2346
static void program_interrupt(CPUState *env, uint32_t code, int ilc)
2348
qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2350
if (kvm_enabled()) {
2352
kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2355
env->int_pgm_code = code;
2356
env->int_pgm_ilc = ilc;
2357
env->exception_index = EXCP_PGM;
2362
static void ext_interrupt(CPUState *env, int type, uint32_t param,
2365
cpu_inject_ext(env, type, param, param64);
2368
int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
2374
printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2377
if (sccb & ~0x7ffffff8ul) {
2378
fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2384
case SCLP_CMDW_READ_SCP_INFO:
2385
case SCLP_CMDW_READ_SCP_INFO_FORCED:
2386
while ((ram_size >> (20 + shift)) > 65535) {
2389
stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
2390
stb_phys(sccb + SCP_INCREMENT, 1 << shift);
2391
stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2393
if (kvm_enabled()) {
2395
kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2400
ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2405
printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2415
/* SCLP service call */
2416
uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2418
if (sclp_service_call(env, r1, r2)) {
2426
uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2433
r = s390_virtio_hypercall(env, mem, code);
2449
program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2456
void HELPER(stidp)(uint64_t a1)
2458
stq(a1, env->cpu_num);
2462
void HELPER(spx)(uint64_t a1)
2467
env->psa = prefix & 0xfffff000;
2468
qemu_log("prefix: %#x\n", prefix);
2469
tlb_flush_page(env, 0);
2470
tlb_flush_page(env, TARGET_PAGE_SIZE);
2474
uint32_t HELPER(sck)(uint64_t a1)
2476
/* XXX not implemented - is it necessary? */
2481
static inline uint64_t clock_value(CPUState *env)
2485
time = env->tod_offset +
2486
time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2492
uint32_t HELPER(stck)(uint64_t a1)
2494
stq(a1, clock_value(env));
2499
/* Store Clock Extended */
2500
uint32_t HELPER(stcke)(uint64_t a1)
2503
/* basically the same value as stck */
2504
stq(a1 + 1, clock_value(env) | env->cpu_num);
2505
/* more fine grained than stck */
2507
/* XXX programmable fields */
2514
/* Set Clock Comparator */
2515
void HELPER(sckc)(uint64_t a1)
2517
uint64_t time = ldq(a1);
2519
if (time == -1ULL) {
2523
/* difference between now and then */
2524
time -= clock_value(env);
2526
time = (time * 125) >> 9;
2528
qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2531
/* Store Clock Comparator */
2532
void HELPER(stckc)(uint64_t a1)
2539
void HELPER(spt)(uint64_t a1)
2541
uint64_t time = ldq(a1);
2543
if (time == -1ULL) {
2548
time = (time * 125) >> 9;
2550
qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2553
/* Store CPU Timer */
2554
void HELPER(stpt)(uint64_t a1)
2560
/* Store System Information */
2561
uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2566
if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2567
((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2568
/* valid function code, invalid reserved bits */
2569
program_interrupt(env, PGM_SPECIFICATION, 2);
2572
sel1 = r0 & STSI_R0_SEL1_MASK;
2573
sel2 = r1 & STSI_R1_SEL2_MASK;
2575
/* XXX: spec exception if sysib is not 4k-aligned */
2577
switch (r0 & STSI_LEVEL_MASK) {
2579
if ((sel1 == 1) && (sel2 == 1)) {
2580
/* Basic Machine Configuration */
2581
struct sysib_111 sysib;
2583
memset(&sysib, 0, sizeof(sysib));
2584
ebcdic_put(sysib.manuf, "QEMU ", 16);
2585
/* same as machine type number in STORE CPU ID */
2586
ebcdic_put(sysib.type, "QEMU", 4);
2587
/* same as model number in STORE CPU ID */
2588
ebcdic_put(sysib.model, "QEMU ", 16);
2589
ebcdic_put(sysib.sequence, "QEMU ", 16);
2590
ebcdic_put(sysib.plant, "QEMU", 4);
2591
cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2592
} else if ((sel1 == 2) && (sel2 == 1)) {
2593
/* Basic Machine CPU */
2594
struct sysib_121 sysib;
2596
memset(&sysib, 0, sizeof(sysib));
2597
/* XXX make different for different CPUs? */
2598
ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2599
ebcdic_put(sysib.plant, "QEMU", 4);
2600
stw_p(&sysib.cpu_addr, env->cpu_num);
2601
cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2602
} else if ((sel1 == 2) && (sel2 == 2)) {
2603
/* Basic Machine CPUs */
2604
struct sysib_122 sysib;
2606
memset(&sysib, 0, sizeof(sysib));
2607
stl_p(&sysib.capability, 0x443afc29);
2608
/* XXX change when SMP comes */
2609
stw_p(&sysib.total_cpus, 1);
2610
stw_p(&sysib.active_cpus, 1);
2611
stw_p(&sysib.standby_cpus, 0);
2612
stw_p(&sysib.reserved_cpus, 0);
2613
cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2620
if ((sel1 == 2) && (sel2 == 1)) {
2622
struct sysib_221 sysib;
2624
memset(&sysib, 0, sizeof(sysib));
2625
/* XXX make different for different CPUs? */
2626
ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2627
ebcdic_put(sysib.plant, "QEMU", 4);
2628
stw_p(&sysib.cpu_addr, env->cpu_num);
2629
stw_p(&sysib.cpu_id, 0);
2630
cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2631
} else if ((sel1 == 2) && (sel2 == 2)) {
2633
struct sysib_222 sysib;
2635
memset(&sysib, 0, sizeof(sysib));
2636
stw_p(&sysib.lpar_num, 0);
2638
/* XXX change when SMP comes */
2639
stw_p(&sysib.total_cpus, 1);
2640
stw_p(&sysib.conf_cpus, 1);
2641
stw_p(&sysib.standby_cpus, 0);
2642
stw_p(&sysib.reserved_cpus, 0);
2643
ebcdic_put(sysib.name, "QEMU ", 8);
2644
stl_p(&sysib.caf, 1000);
2645
stw_p(&sysib.dedicated_cpus, 0);
2646
stw_p(&sysib.shared_cpus, 0);
2647
cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2655
if ((sel1 == 2) && (sel2 == 2)) {
2657
struct sysib_322 sysib;
2659
memset(&sysib, 0, sizeof(sysib));
2661
/* XXX change when SMP comes */
2662
stw_p(&sysib.vm[0].total_cpus, 1);
2663
stw_p(&sysib.vm[0].conf_cpus, 1);
2664
stw_p(&sysib.vm[0].standby_cpus, 0);
2665
stw_p(&sysib.vm[0].reserved_cpus, 0);
2666
ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2667
stl_p(&sysib.vm[0].caf, 1000);
2668
ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2669
cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2675
case STSI_LEVEL_CURRENT:
2676
env->regs[0] = STSI_LEVEL_3;
2686
void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2691
for (i = r1;; i = (i + 1) % 16) {
2692
env->cregs[i] = ldq(src);
2693
HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2694
i, src, env->cregs[i]);
2695
src += sizeof(uint64_t);
2705
void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2710
for (i = r1;; i = (i + 1) % 16) {
2711
env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2712
src += sizeof(uint32_t);
2722
void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2727
for (i = r1;; i = (i + 1) % 16) {
2728
stq(dest, env->cregs[i]);
2729
dest += sizeof(uint64_t);
2737
void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2742
for (i = r1;; i = (i + 1) % 16) {
2743
stl(dest, env->cregs[i]);
2744
dest += sizeof(uint32_t);
2752
uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2759
/* insert storage key extended */
2760
uint64_t HELPER(iske)(uint64_t r2)
2762
uint64_t addr = get_address(0, 0, r2);
2764
if (addr > ram_size) {
2768
return env->storage_keys[addr / TARGET_PAGE_SIZE];
2771
/* set storage key extended */
2772
void HELPER(sske)(uint32_t r1, uint64_t r2)
2774
uint64_t addr = get_address(0, 0, r2);
2776
if (addr > ram_size) {
2780
env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2783
/* reset reference bit extended */
2784
uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2788
if (r2 > ram_size) {
2792
key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
2793
re = key & (SK_R | SK_C);
2794
env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
2799
* 0 Reference bit zero; change bit zero
2800
* 1 Reference bit zero; change bit one
2801
* 2 Reference bit one; change bit zero
2802
* 3 Reference bit one; change bit one
2808
/* compare and swap and purge */
2809
uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2812
uint32_t o1 = env->regs[r1];
2813
uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2814
uint32_t o2 = ldl(a2);
2817
stl(a2, env->regs[(r1 + 1) & 15]);
2818
if (env->regs[r2] & 0x3) {
2819
/* flush TLB / ALB */
2824
env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2831
static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2834
target_ulong src, dest;
2835
int flags, cc = 0, i;
2839
} else if (l > 256) {
2845
if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2848
dest |= a1 & ~TARGET_PAGE_MASK;
2850
if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2853
src |= a2 & ~TARGET_PAGE_MASK;
2855
/* XXX replace w/ memcpy */
2856
for (i = 0; i < l; i++) {
2857
/* XXX be more clever */
2858
if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2859
(((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2860
mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2863
stb_phys(dest + i, ldub_phys(src + i));
2869
uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2871
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2872
__FUNCTION__, l, a1, a2);
2874
return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2877
uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2879
HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2880
__FUNCTION__, l, a1, a2);
2882
return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2885
uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2889
HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2890
__FUNCTION__, order_code, r1, cpu_addr);
2892
/* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2893
as parameter (input). Status (output) is always R1. */
2895
switch (order_code) {
2900
/* enumerate CPU status */
2902
/* XXX implement when SMP comes */
2905
env->regs[r1] &= 0xffffffff00000000ULL;
2908
#if !defined (CONFIG_USER_ONLY)
2910
qemu_system_reset_request();
2914
qemu_system_shutdown_request();
2920
fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2927
void HELPER(sacf)(uint64_t a1)
2929
HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2931
switch (a1 & 0xf00) {
2933
env->psw.mask &= ~PSW_MASK_ASC;
2934
env->psw.mask |= PSW_ASC_PRIMARY;
2937
env->psw.mask &= ~PSW_MASK_ASC;
2938
env->psw.mask |= PSW_ASC_SECONDARY;
2941
env->psw.mask &= ~PSW_MASK_ASC;
2942
env->psw.mask |= PSW_ASC_HOME;
2945
qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2946
program_interrupt(env, PGM_SPECIFICATION, 2);
2951
/* invalidate pte */
2952
void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2954
uint64_t page = vaddr & TARGET_PAGE_MASK;
2957
/* XXX broadcast to other CPUs */
2959
/* XXX Linux is nice enough to give us the exact pte address.
2960
According to spec we'd have to find it out ourselves */
2961
/* XXX Linux is fine with overwriting the pte, the spec requires
2962
us to only set the invalid bit */
2963
stq_phys(pte_addr, pte | _PAGE_INVALID);
2965
/* XXX we exploit the fact that Linux passes the exact virtual
2966
address here - it's not obliged to! */
2967
tlb_flush_page(env, page);
2969
/* XXX 31-bit hack */
2970
if (page & 0x80000000) {
2971
tlb_flush_page(env, page & ~0x80000000);
2973
tlb_flush_page(env, page | 0x80000000);
2977
/* flush local tlb */
2978
void HELPER(ptlb)(void)
2983
/* store using real address */
2984
void HELPER(stura)(uint64_t addr, uint32_t v1)
2986
stw_phys(get_address(0, 0, addr), v1);
2989
/* load real address */
2990
uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2993
int old_exc = env->exception_index;
2994
uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2998
/* XXX incomplete - has more corner cases */
2999
if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
3000
program_interrupt(env, PGM_SPECIAL_OP, 2);
3003
env->exception_index = old_exc;
3004
if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
3007
if (env->exception_index == EXCP_PGM) {
3008
ret = env->int_pgm_code | 0x80000000;
3010
ret |= addr & ~TARGET_PAGE_MASK;
3012
env->exception_index = old_exc;
3014
if (!(env->psw.mask & PSW_MASK_64)) {
3015
env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
3017
env->regs[r1] = ret;