1
/* $Id: IEMAllCImplStrInstr.cpp.h $ */
3
* IEM - String Instruction Implementation Code Template.
7
* Copyright (C) 2011-2012 Oracle Corporation
9
* This file is part of VirtualBox Open Source Edition (OSE), as
10
* available from http://www.virtualbox.org. This file is free software;
11
* you can redistribute it and/or modify it under the terms of the GNU
12
* General Public License (GPL) as published by the Free Software
13
* Foundation, in version 2 as it comes in the "COPYING" file of the
14
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
19
/*******************************************************************************
20
* Defined Constants And Macros *
21
*******************************************************************************/
31
# error "Bad OP_SIZE."
33
#define OP_TYPE RT_CONCAT3(uint,OP_SIZE,_t)
39
# define ADDR2_TYPE uint32_t
44
# define ADDR2_TYPE uint32_t
49
# define ADDR2_TYPE uint64_t
50
# define IS_64_BIT_CODE(a_pIemCpu) (true)
52
# error "Bad ADDR_SIZE."
54
#define ADDR_TYPE RT_CONCAT3(uint,ADDR_SIZE,_t)
56
#if ADDR_SIZE == 64 || OP_SIZE == 64
57
# define IS_64_BIT_CODE(a_pIemCpu) (true)
59
# define IS_64_BIT_CODE(a_pIemCpu) ((a_pIemCpu)->enmCpuMode == IEMMODE_64BIT)
61
# define IS_64_BIT_CODE(a_pIemCpu) (false)
66
* Implements 'REPE CMPS'.
68
IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repe_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
70
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
75
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
78
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
82
PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
84
VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
85
if (rcStrict != VINF_SUCCESS)
89
rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
90
if (rcStrict != VINF_SUCCESS)
93
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
94
ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
95
ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
96
uint32_t uEFlags = pCtx->eflags.u;
104
* Do segmentation and virtual page stuff.
106
ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
107
ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
108
uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
109
if (cLeftSrc1Page > uCounterReg)
110
cLeftSrc1Page = uCounterReg;
111
uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
112
uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
114
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
115
&& cbIncr > 0 /** @todo Optimize reverse direction string ops. */
116
&& ( IS_64_BIT_CODE(pIemCpu)
117
|| ( uSrc1AddrReg < pSrc1Hid->u32Limit
118
&& uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
119
&& uSrc2AddrReg < pCtx->es.u32Limit
120
&& uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
124
RTGCPHYS GCPhysSrc1Mem;
125
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
126
if (rcStrict != VINF_SUCCESS)
129
RTGCPHYS GCPhysSrc2Mem;
130
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
131
if (rcStrict != VINF_SUCCESS)
135
* If we can map the page without trouble, do a block processing
136
* until the end of the current page.
138
PGMPAGEMAPLOCK PgLockSrc2Mem;
139
OP_TYPE const *puSrc2Mem;
140
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
141
if (rcStrict == VINF_SUCCESS)
143
PGMPAGEMAPLOCK PgLockSrc1Mem;
144
OP_TYPE const *puSrc1Mem;
145
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
146
if (rcStrict == VINF_SUCCESS)
148
if (!memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
150
/* All matches, only compare the last itme to get the right eflags. */
151
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
152
uSrc1AddrReg += cLeftPage * cbIncr;
153
uSrc2AddrReg += cLeftPage * cbIncr;
154
uCounterReg -= cLeftPage;
158
/* Some mismatch, compare each item (and keep volatile
163
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
165
} while ( off < cLeftPage
166
&& (uEFlags & X86_EFL_ZF));
167
uSrc1AddrReg += cbIncr * off;
168
uSrc2AddrReg += cbIncr * off;
172
/* Update the registers before looping. */
173
pCtx->ADDR_rCX = uCounterReg;
174
pCtx->ADDR_rSI = uSrc1AddrReg;
175
pCtx->ADDR_rDI = uSrc2AddrReg;
176
pCtx->eflags.u = uEFlags;
178
iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
179
iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
182
iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
187
* Fallback - slow processing till the end of the current page.
188
* In the cross page boundrary case we will end up here with cLeftPage
189
* as 0, we execute one loop then.
194
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
195
if (rcStrict != VINF_SUCCESS)
198
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
199
if (rcStrict != VINF_SUCCESS)
201
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
203
pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
204
pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
205
pCtx->ADDR_rCX = --uCounterReg;
206
pCtx->eflags.u = uEFlags;
208
} while ( (int32_t)cLeftPage > 0
209
&& (uEFlags & X86_EFL_ZF));
210
} while ( uCounterReg != 0
211
&& (uEFlags & X86_EFL_ZF));
216
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
222
* Implements 'REPNE CMPS'.
224
IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_repne_cmps_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
226
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
231
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
232
if (uCounterReg == 0)
234
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
238
PCCPUMSELREGHID pSrc1Hid = iemSRegGetHid(pIemCpu, iEffSeg);
240
VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrc1Hid, iEffSeg, &uSrc1Base);
241
if (rcStrict != VINF_SUCCESS)
245
rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uSrc2Base);
246
if (rcStrict != VINF_SUCCESS)
249
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
250
ADDR_TYPE uSrc1AddrReg = pCtx->ADDR_rSI;
251
ADDR_TYPE uSrc2AddrReg = pCtx->ADDR_rDI;
252
uint32_t uEFlags = pCtx->eflags.u;
260
* Do segmentation and virtual page stuff.
262
ADDR2_TYPE uVirtSrc1Addr = uSrc1AddrReg + (ADDR2_TYPE)uSrc1Base;
263
ADDR2_TYPE uVirtSrc2Addr = uSrc2AddrReg + (ADDR2_TYPE)uSrc2Base;
264
uint32_t cLeftSrc1Page = (PAGE_SIZE - (uVirtSrc1Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
265
if (cLeftSrc1Page > uCounterReg)
266
cLeftSrc1Page = uCounterReg;
267
uint32_t cLeftSrc2Page = (PAGE_SIZE - (uVirtSrc2Addr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
268
uint32_t cLeftPage = RT_MIN(cLeftSrc1Page, cLeftSrc2Page);
270
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
271
&& cbIncr > 0 /** @todo Optimize reverse direction string ops. */
272
&& ( IS_64_BIT_CODE(pIemCpu)
273
|| ( uSrc1AddrReg < pSrc1Hid->u32Limit
274
&& uSrc1AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrc1Hid->u32Limit
275
&& uSrc2AddrReg < pCtx->es.u32Limit
276
&& uSrc2AddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
280
RTGCPHYS GCPhysSrc1Mem;
281
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc1Addr, IEM_ACCESS_DATA_R, &GCPhysSrc1Mem);
282
if (rcStrict != VINF_SUCCESS)
285
RTGCPHYS GCPhysSrc2Mem;
286
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrc2Addr, IEM_ACCESS_DATA_R, &GCPhysSrc2Mem);
287
if (rcStrict != VINF_SUCCESS)
291
* If we can map the page without trouble, do a block processing
292
* until the end of the current page.
294
OP_TYPE const *puSrc2Mem;
295
PGMPAGEMAPLOCK PgLockSrc2Mem;
296
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, (void **)&puSrc2Mem, &PgLockSrc2Mem);
297
if (rcStrict == VINF_SUCCESS)
299
OP_TYPE const *puSrc1Mem;
300
PGMPAGEMAPLOCK PgLockSrc1Mem;
301
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, (void **)&puSrc1Mem, &PgLockSrc1Mem);
302
if (rcStrict == VINF_SUCCESS)
304
if (memcmp(puSrc2Mem, puSrc1Mem, cLeftPage * (OP_SIZE / 8)))
306
/* All matches, only compare the last item to get the right eflags. */
307
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[cLeftPage-1], puSrc2Mem[cLeftPage-1], &uEFlags);
308
uSrc1AddrReg += cLeftPage * cbIncr;
309
uSrc2AddrReg += cLeftPage * cbIncr;
310
uCounterReg -= cLeftPage;
314
/* Some mismatch, compare each item (and keep volatile
319
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&puSrc1Mem[off], puSrc2Mem[off], &uEFlags);
321
} while ( off < cLeftPage
322
&& !(uEFlags & X86_EFL_ZF));
323
uSrc1AddrReg += cbIncr * off;
324
uSrc2AddrReg += cbIncr * off;
328
/* Update the registers before looping. */
329
pCtx->ADDR_rCX = uCounterReg;
330
pCtx->ADDR_rSI = uSrc1AddrReg;
331
pCtx->ADDR_rDI = uSrc2AddrReg;
332
pCtx->eflags.u = uEFlags;
334
iemMemPageUnmap(pIemCpu, GCPhysSrc1Mem, IEM_ACCESS_DATA_R, puSrc1Mem, &PgLockSrc1Mem);
335
iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
338
iemMemPageUnmap(pIemCpu, GCPhysSrc2Mem, IEM_ACCESS_DATA_R, puSrc2Mem, &PgLockSrc2Mem);
343
* Fallback - slow processing till the end of the current page.
344
* In the cross page boundrary case we will end up here with cLeftPage
345
* as 0, we execute one loop then.
350
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue1, iEffSeg, uSrc1AddrReg);
351
if (rcStrict != VINF_SUCCESS)
354
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue2, X86_SREG_ES, uSrc2AddrReg);
355
if (rcStrict != VINF_SUCCESS)
357
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)(&uValue1, uValue2, &uEFlags);
359
pCtx->ADDR_rSI = uSrc1AddrReg += cbIncr;
360
pCtx->ADDR_rDI = uSrc2AddrReg += cbIncr;
361
pCtx->ADDR_rCX = --uCounterReg;
362
pCtx->eflags.u = uEFlags;
364
} while ( (int32_t)cLeftPage > 0
365
&& !(uEFlags & X86_EFL_ZF));
366
} while ( uCounterReg != 0
367
&& !(uEFlags & X86_EFL_ZF));
372
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
378
* Implements 'REPE SCAS'.
380
IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repe_scas_,OP_rAX,_m,ADDR_SIZE))
382
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
387
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
388
if (uCounterReg == 0)
390
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
395
VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
396
if (rcStrict != VINF_SUCCESS)
399
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
400
OP_TYPE const uValueReg = pCtx->OP_rAX;
401
ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
402
uint32_t uEFlags = pCtx->eflags.u;
410
* Do segmentation and virtual page stuff.
412
ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
413
uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
414
if (cLeftPage > uCounterReg)
415
cLeftPage = uCounterReg;
416
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
417
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
418
&& ( IS_64_BIT_CODE(pIemCpu)
419
|| ( uAddrReg < pCtx->es.u32Limit
420
&& uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
425
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
426
if (rcStrict != VINF_SUCCESS)
430
* If we can map the page without trouble, do a block processing
431
* until the end of the current page.
433
PGMPAGEMAPLOCK PgLockMem;
434
OP_TYPE const *puMem;
435
rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
436
if (rcStrict == VINF_SUCCESS)
438
/* Search till we find a mismatching item. */
444
uTmpValue = puMem[i++];
445
fQuit = uTmpValue != uValueReg;
446
} while (i < cLeftPage && !fQuit);
448
/* Update the regs. */
449
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
450
pCtx->ADDR_rCX = uCounterReg -= i;
451
pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
452
pCtx->eflags.u = uEFlags;
453
Assert(!(uEFlags & X86_EFL_ZF) == fQuit);
454
iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
459
/* If unaligned, we drop thru and do the page crossing access
460
below. Otherwise, do the next page. */
461
if (!(uVirtAddr & (OP_SIZE - 1)))
463
if (uCounterReg == 0)
470
* Fallback - slow processing till the end of the current page.
471
* In the cross page boundrary case we will end up here with cLeftPage
472
* as 0, we execute one loop then.
477
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
478
if (rcStrict != VINF_SUCCESS)
480
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
482
pCtx->ADDR_rDI = uAddrReg += cbIncr;
483
pCtx->ADDR_rCX = --uCounterReg;
484
pCtx->eflags.u = uEFlags;
486
} while ( (int32_t)cLeftPage > 0
487
&& (uEFlags & X86_EFL_ZF));
488
} while ( uCounterReg != 0
489
&& (uEFlags & X86_EFL_ZF));
494
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
500
* Implements 'REPNE SCAS'.
502
IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_repne_scas_,OP_rAX,_m,ADDR_SIZE))
504
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
509
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
510
if (uCounterReg == 0)
512
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
517
VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
518
if (rcStrict != VINF_SUCCESS)
521
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
522
OP_TYPE const uValueReg = pCtx->OP_rAX;
523
ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
524
uint32_t uEFlags = pCtx->eflags.u;
532
* Do segmentation and virtual page stuff.
534
ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
535
uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
536
if (cLeftPage > uCounterReg)
537
cLeftPage = uCounterReg;
538
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
539
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
540
&& ( IS_64_BIT_CODE(pIemCpu)
541
|| ( uAddrReg < pCtx->es.u32Limit
542
&& uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
547
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
548
if (rcStrict != VINF_SUCCESS)
552
* If we can map the page without trouble, do a block processing
553
* until the end of the current page.
555
PGMPAGEMAPLOCK PgLockMem;
556
OP_TYPE const *puMem;
557
rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
558
if (rcStrict == VINF_SUCCESS)
560
/* Search till we find a mismatching item. */
566
uTmpValue = puMem[i++];
567
fQuit = uTmpValue == uValueReg;
568
} while (i < cLeftPage && !fQuit);
570
/* Update the regs. */
571
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
572
pCtx->ADDR_rCX = uCounterReg -= i;
573
pCtx->ADDR_rDI = uAddrReg += i * cbIncr;
574
pCtx->eflags.u = uEFlags;
575
Assert(!!(uEFlags & X86_EFL_ZF) == fQuit);
576
iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
581
/* If unaligned, we drop thru and do the page crossing access
582
below. Otherwise, do the next page. */
583
if (!(uVirtAddr & (OP_SIZE - 1)))
585
if (uCounterReg == 0)
592
* Fallback - slow processing till the end of the current page.
593
* In the cross page boundrary case we will end up here with cLeftPage
594
* as 0, we execute one loop then.
599
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, X86_SREG_ES, uAddrReg);
600
if (rcStrict != VINF_SUCCESS)
602
RT_CONCAT(iemAImpl_cmp_u,OP_SIZE)((OP_TYPE *)&uValueReg, uTmpValue, &uEFlags);
603
pCtx->ADDR_rDI = uAddrReg += cbIncr;
604
pCtx->ADDR_rCX = --uCounterReg;
605
pCtx->eflags.u = uEFlags;
607
} while ( (int32_t)cLeftPage > 0
608
&& !(uEFlags & X86_EFL_ZF));
609
} while ( uCounterReg != 0
610
&& !(uEFlags & X86_EFL_ZF));
615
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
623
* Implements 'REP MOVS'.
625
IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_movs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg)
627
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
632
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
633
if (uCounterReg == 0)
635
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
639
PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
641
VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uSrcBase);
642
if (rcStrict != VINF_SUCCESS)
646
rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uDstBase);
647
if (rcStrict != VINF_SUCCESS)
650
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
651
ADDR_TYPE uSrcAddrReg = pCtx->ADDR_rSI;
652
ADDR_TYPE uDstAddrReg = pCtx->ADDR_rDI;
655
* Be careful with handle bypassing.
657
if (pIemCpu->fBypassHandlers)
659
Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
660
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
664
* If we're reading back what we write, we have to let the verfication code
665
* to prevent a false positive.
666
* Note! This doesn't take aliasing or wrapping into account - lazy bird.
668
#ifdef IEM_VERIFICATION_MODE_FULL
669
if ( IEM_VERIFICATION_ENABLED(pIemCpu)
671
? uSrcAddrReg <= uDstAddrReg
672
&& uSrcAddrReg + cbIncr * uCounterReg > uDstAddrReg
673
: uDstAddrReg <= uSrcAddrReg
674
&& uDstAddrReg + cbIncr * uCounterReg > uSrcAddrReg))
675
pIemCpu->fOverlappingMovs = true;
684
* Do segmentation and virtual page stuff.
686
ADDR2_TYPE uVirtSrcAddr = uSrcAddrReg + (ADDR2_TYPE)uSrcBase;
687
ADDR2_TYPE uVirtDstAddr = uDstAddrReg + (ADDR2_TYPE)uDstBase;
688
uint32_t cLeftSrcPage = (PAGE_SIZE - (uVirtSrcAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
689
if (cLeftSrcPage > uCounterReg)
690
cLeftSrcPage = uCounterReg;
691
uint32_t cLeftDstPage = (PAGE_SIZE - (uVirtDstAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
692
uint32_t cLeftPage = RT_MIN(cLeftSrcPage, cLeftDstPage);
694
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
695
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
696
&& ( IS_64_BIT_CODE(pIemCpu)
697
|| ( uSrcAddrReg < pSrcHid->u32Limit
698
&& uSrcAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit
699
&& uDstAddrReg < pCtx->es.u32Limit
700
&& uDstAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
704
RTGCPHYS GCPhysSrcMem;
705
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtSrcAddr, IEM_ACCESS_DATA_R, &GCPhysSrcMem);
706
if (rcStrict != VINF_SUCCESS)
709
RTGCPHYS GCPhysDstMem;
710
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtDstAddr, IEM_ACCESS_DATA_W, &GCPhysDstMem);
711
if (rcStrict != VINF_SUCCESS)
715
* If we can map the page without trouble, do a block processing
716
* until the end of the current page.
718
PGMPAGEMAPLOCK PgLockDstMem;
720
rcStrict = iemMemPageMap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, (void **)&puDstMem, &PgLockDstMem);
721
if (rcStrict == VINF_SUCCESS)
723
PGMPAGEMAPLOCK PgLockSrcMem;
724
OP_TYPE const *puSrcMem;
725
rcStrict = iemMemPageMap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, (void **)&puSrcMem, &PgLockSrcMem);
726
if (rcStrict == VINF_SUCCESS)
728
Assert( (GCPhysSrcMem >> PAGE_SHIFT) != (GCPhysDstMem >> PAGE_SHIFT)
729
|| ((uintptr_t)puSrcMem >> PAGE_SHIFT) == ((uintptr_t)puDstMem >> PAGE_SHIFT));
731
/* Perform the operation exactly (don't use memcpy to avoid
732
having to consider how its implementation would affect
733
any overlapping source and destination area). */
734
OP_TYPE const *puSrcCur = puSrcMem;
735
OP_TYPE *puDstCur = puDstMem;
736
uint32_t cTodo = cLeftPage;
738
*puDstCur++ = *puSrcCur++;
740
/* Update the registers. */
741
pCtx->ADDR_rSI = uSrcAddrReg += cLeftPage * cbIncr;
742
pCtx->ADDR_rDI = uDstAddrReg += cLeftPage * cbIncr;
743
pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
745
iemMemPageUnmap(pIemCpu, GCPhysSrcMem, IEM_ACCESS_DATA_R, puSrcMem, &PgLockSrcMem);
746
iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
749
iemMemPageUnmap(pIemCpu, GCPhysDstMem, IEM_ACCESS_DATA_W, puDstMem, &PgLockDstMem);
754
* Fallback - slow processing till the end of the current page.
755
* In the cross page boundrary case we will end up here with cLeftPage
756
* as 0, we execute one loop then.
761
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uSrcAddrReg);
762
if (rcStrict != VINF_SUCCESS)
764
rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uDstAddrReg, uValue);
765
if (rcStrict != VINF_SUCCESS)
768
pCtx->ADDR_rSI = uSrcAddrReg += cbIncr;
769
pCtx->ADDR_rDI = uDstAddrReg += cbIncr;
770
pCtx->ADDR_rCX = --uCounterReg;
772
} while ((int32_t)cLeftPage > 0);
773
} while (uCounterReg != 0);
778
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
784
* Implements 'REP STOS'.
786
IEM_CIMPL_DEF_0(RT_CONCAT4(iemCImpl_stos_,OP_rAX,_m,ADDR_SIZE))
788
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
793
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
794
if (uCounterReg == 0)
796
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
801
VBOXSTRICTRC rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
802
if (rcStrict != VINF_SUCCESS)
805
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
806
OP_TYPE const uValue = pCtx->OP_rAX;
807
ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
810
* Be careful with handle bypassing.
812
/** @todo Permit doing a page if correctly aligned. */
813
if (pIemCpu->fBypassHandlers)
815
Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
816
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
825
* Do segmentation and virtual page stuff.
827
ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
828
uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
829
if (cLeftPage > uCounterReg)
830
cLeftPage = uCounterReg;
831
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
832
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
833
&& ( IS_64_BIT_CODE(pIemCpu)
834
|| ( uAddrReg < pCtx->es.u32Limit
835
&& uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
840
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
841
if (rcStrict != VINF_SUCCESS)
845
* If we can map the page without trouble, do a block processing
846
* until the end of the current page.
848
PGMPAGEMAPLOCK PgLockMem;
850
rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
851
if (rcStrict == VINF_SUCCESS)
853
/* Update the regs first so we can loop on cLeftPage. */
854
pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
855
pCtx->ADDR_rDI = uAddrReg += cLeftPage * cbIncr;
857
/* Do the memsetting. */
859
memset(puMem, uValue, cLeftPage);
860
/*#elif OP_SIZE == 32
861
ASMMemFill32(puMem, cLeftPage * (OP_SIZE / 8), uValue);*/
863
while (cLeftPage-- > 0)
867
iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
869
/* If unaligned, we drop thru and do the page crossing access
870
below. Otherwise, do the next page. */
871
if (!(uVirtAddr & (OP_SIZE - 1)))
873
if (uCounterReg == 0)
880
* Fallback - slow processing till the end of the current page.
881
* In the cross page boundrary case we will end up here with cLeftPage
882
* as 0, we execute one loop then.
886
rcStrict = RT_CONCAT(iemMemStoreDataU,OP_SIZE)(pIemCpu, X86_SREG_ES, uAddrReg, uValue);
887
if (rcStrict != VINF_SUCCESS)
889
pCtx->ADDR_rDI = uAddrReg += cbIncr;
890
pCtx->ADDR_rCX = --uCounterReg;
892
} while ((int32_t)cLeftPage > 0);
893
} while (uCounterReg != 0);
898
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
904
* Implements 'REP LODS'.
906
IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_lods_,OP_rAX,_m,ADDR_SIZE), int8_t, iEffSeg)
908
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
913
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
914
if (uCounterReg == 0)
916
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
920
PCCPUMSELREGHID pSrcHid = iemSRegGetHid(pIemCpu, iEffSeg);
922
VBOXSTRICTRC rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pSrcHid, iEffSeg, &uBaseAddr);
923
if (rcStrict != VINF_SUCCESS)
926
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
927
ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
935
* Do segmentation and virtual page stuff.
937
ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
938
uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
939
if (cLeftPage > uCounterReg)
940
cLeftPage = uCounterReg;
941
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
942
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
943
&& ( IS_64_BIT_CODE(pIemCpu)
944
|| ( uAddrReg < pSrcHid->u32Limit
945
&& uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pSrcHid->u32Limit)
950
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
951
if (rcStrict != VINF_SUCCESS)
955
* If we can map the page without trouble, we can get away with
956
* just reading the last value on the page.
958
PGMPAGEMAPLOCK PgLockMem;
959
OP_TYPE const *puMem;
960
rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
961
if (rcStrict == VINF_SUCCESS)
963
/* Only get the last byte, the rest doesn't matter in direct access mode. */
965
pCtx->rax = puMem[cLeftPage - 1];
967
pCtx->OP_rAX = puMem[cLeftPage - 1];
969
pCtx->ADDR_rCX = uCounterReg -= cLeftPage;
970
pCtx->ADDR_rSI = uAddrReg += cLeftPage * cbIncr;
971
iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
973
/* If unaligned, we drop thru and do the page crossing access
974
below. Otherwise, do the next page. */
975
if (!(uVirtAddr & (OP_SIZE - 1)))
977
if (uCounterReg == 0)
984
* Fallback - slow processing till the end of the current page.
985
* In the cross page boundrary case we will end up here with cLeftPage
986
* as 0, we execute one loop then.
991
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uTmpValue, iEffSeg, uAddrReg);
992
if (rcStrict != VINF_SUCCESS)
995
pCtx->rax = uTmpValue;
997
pCtx->OP_rAX = uTmpValue;
999
pCtx->ADDR_rSI = uAddrReg += cbIncr;
1000
pCtx->ADDR_rCX = --uCounterReg;
1002
} while ((int32_t)cLeftPage > 0);
1003
if (rcStrict != VINF_SUCCESS)
1005
} while (uCounterReg != 0);
1010
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1011
return VINF_SUCCESS;
1018
* Implements 'INS' (no rep)
1020
IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1022
PVM pVM = IEMCPU_TO_VM(pIemCpu);
1023
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1024
VBOXSTRICTRC rcStrict;
1027
* Be careful with handle bypassing.
1029
if (pIemCpu->fBypassHandlers)
1031
Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1032
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1036
* ASSUMES the #GP for I/O permission is taken first, then any #GP for
1037
* segmentation and finally any #PF due to virtual address translation.
1038
* ASSUMES nothing is read from the I/O port before traps are taken.
1042
rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1043
if (rcStrict != VINF_SUCCESS)
1048
rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, pCtx->ADDR_rDI, IEM_ACCESS_DATA_W);
1049
if (rcStrict != VINF_SUCCESS)
1052
uint32_t u32Value = 0;
1053
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1054
rcStrict = IOMIOPortRead(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, &u32Value, OP_SIZE / 8);
1056
rcStrict = iemVerifyFakeIOPortRead(pIemCpu, pCtx->dx, &u32Value, OP_SIZE / 8);
1057
if (IOM_SUCCESS(rcStrict))
1059
*puMem = (OP_TYPE)u32Value;
1060
VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1061
if (RT_LIKELY(rcStrict2 == VINF_SUCCESS))
1063
if (!pCtx->eflags.Bits.u1DF)
1064
pCtx->ADDR_rDI += OP_SIZE / 8;
1066
pCtx->ADDR_rDI -= OP_SIZE / 8;
1067
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1069
/* iemMemMap already check permissions, so this may only be real errors
1070
or access handlers medling. The access handler case is going to
1071
cause misbehavior if the instruction is re-interpreted or smth. So,
1072
we fail with an internal error here instead. */
1074
AssertLogRelFailedReturn(VERR_IEM_IPE_1);
1081
* Implements 'REP INS'.
1083
IEM_CIMPL_DEF_1(RT_CONCAT4(iemCImpl_rep_ins_op,OP_SIZE,_addr,ADDR_SIZE), bool, fIoChecked)
1085
PVM pVM = IEMCPU_TO_VM(pIemCpu);
1086
PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1087
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1092
uint16_t const u16Port = pCtx->dx;
1093
VBOXSTRICTRC rcStrict;
1096
rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1097
if (rcStrict != VINF_SUCCESS)
1101
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1102
if (uCounterReg == 0)
1104
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1105
return VINF_SUCCESS;
1109
rcStrict = iemMemSegCheckWriteAccessEx(pIemCpu, &pCtx->es, X86_SREG_ES, &uBaseAddr);
1110
if (rcStrict != VINF_SUCCESS)
1113
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1114
ADDR_TYPE uAddrReg = pCtx->ADDR_rDI;
1117
* Be careful with handle bypassing.
1119
if (pIemCpu->fBypassHandlers)
1121
Log(("%s: declining because we're bypassing handlers\n", __FUNCTION__));
1122
return VERR_IEM_ASPECT_NOT_IMPLEMENTED;
1131
* Do segmentation and virtual page stuff.
1133
ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1134
uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1135
if (cLeftPage > uCounterReg)
1136
cLeftPage = uCounterReg;
1137
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1138
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
1139
&& ( IS_64_BIT_CODE(pIemCpu)
1140
|| ( uAddrReg < pCtx->es.u32Limit
1141
&& uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pCtx->es.u32Limit)
1146
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_W, &GCPhysMem);
1147
if (rcStrict != VINF_SUCCESS)
1151
* If we can map the page without trouble, we would've liked to use
1152
* an string I/O method to do the work, but the current IOM
1153
* interface doesn't match our current approach. So, do a regular
1156
/** @todo Change the I/O manager interface to make use of
1157
* mapped buffers instead of leaving those bits to the
1158
* device implementation! */
1159
PGMPAGEMAPLOCK PgLockMem;
1161
rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, (void **)&puMem, &PgLockMem);
1162
if (rcStrict == VINF_SUCCESS)
1165
while (off < cLeftPage)
1168
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1169
rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1171
rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1172
if (IOM_SUCCESS(rcStrict))
1174
puMem[off] = (OP_TYPE)u32Value;
1175
pCtx->ADDR_rDI = uAddrReg += cbIncr;
1176
pCtx->ADDR_rCX = --uCounterReg;
1178
if (rcStrict != VINF_SUCCESS)
1180
if (IOM_SUCCESS(rcStrict))
1182
rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1183
if (uCounterReg == 0)
1184
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1186
iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1191
iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_W, puMem, &PgLockMem);
1193
/* If unaligned, we drop thru and do the page crossing access
1194
below. Otherwise, do the next page. */
1195
if (!(uVirtAddr & (OP_SIZE - 1)))
1197
if (uCounterReg == 0)
1204
* Fallback - slow processing till the end of the current page.
1205
* In the cross page boundrary case we will end up here with cLeftPage
1206
* as 0, we execute one loop then.
1208
* Note! We ASSUME the CPU will raise #PF or #GP before access the
1209
* I/O port, otherwise it wouldn't really be restartable.
1211
/** @todo investigate what the CPU actually does with \#PF/\#GP
1216
rcStrict = iemMemMap(pIemCpu, (void **)&puMem, OP_SIZE / 8, X86_SREG_ES, uAddrReg, IEM_ACCESS_DATA_W);
1217
if (rcStrict != VINF_SUCCESS)
1220
uint32_t u32Value = 0;
1221
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1222
rcStrict = IOMIOPortRead(pVM, pVCpu, u16Port, &u32Value, OP_SIZE / 8);
1224
rcStrict = iemVerifyFakeIOPortRead(pIemCpu, u16Port, &u32Value, OP_SIZE / 8);
1225
if (!IOM_SUCCESS(rcStrict))
1228
*puMem = (OP_TYPE)u32Value;
1229
VBOXSTRICTRC rcStrict2 = iemMemCommitAndUnmap(pIemCpu, puMem, IEM_ACCESS_DATA_W);
1230
AssertLogRelReturn(rcStrict2 == VINF_SUCCESS, VERR_IEM_IPE_1); /* See non-rep version. */
1232
pCtx->ADDR_rDI = uAddrReg += cbIncr;
1233
pCtx->ADDR_rCX = --uCounterReg;
1236
if (rcStrict != VINF_SUCCESS)
1238
if (uCounterReg == 0)
1239
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1240
rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1243
} while ((int32_t)cLeftPage > 0);
1244
} while (uCounterReg != 0);
1249
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1250
return VINF_SUCCESS;
1255
* Implements 'OUTS' (no rep)
1257
IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1259
PVM pVM = IEMCPU_TO_VM(pIemCpu);
1260
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1261
VBOXSTRICTRC rcStrict;
1264
* ASSUMES the #GP for I/O permission is taken first, then any #GP for
1265
* segmentation and finally any #PF due to virtual address translation.
1266
* ASSUMES nothing is read from the I/O port before traps are taken.
1270
rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, pCtx->dx, OP_SIZE / 8);
1271
if (rcStrict != VINF_SUCCESS)
1276
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, pCtx->ADDR_rSI);
1277
if (rcStrict == VINF_SUCCESS)
1279
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1280
rcStrict = IOMIOPortWrite(pVM, IEMCPU_TO_VMCPU(pIemCpu), pCtx->dx, uValue, OP_SIZE / 8);
1282
rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, pCtx->dx, uValue, OP_SIZE / 8);
1283
if (IOM_SUCCESS(rcStrict))
1285
if (!pCtx->eflags.Bits.u1DF)
1286
pCtx->ADDR_rSI += OP_SIZE / 8;
1288
pCtx->ADDR_rSI -= OP_SIZE / 8;
1289
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1290
if (rcStrict != VINF_SUCCESS)
1291
rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1299
* Implements 'REP OUTS'.
1301
IEM_CIMPL_DEF_2(RT_CONCAT4(iemCImpl_rep_outs_op,OP_SIZE,_addr,ADDR_SIZE), uint8_t, iEffSeg, bool, fIoChecked)
1303
PVM pVM = IEMCPU_TO_VM(pIemCpu);
1304
PVMCPU pVCpu = IEMCPU_TO_VMCPU(pIemCpu);
1305
PCPUMCTX pCtx = pIemCpu->CTX_SUFF(pCtx);
1310
uint16_t const u16Port = pCtx->dx;
1311
VBOXSTRICTRC rcStrict;
1314
rcStrict = iemHlpCheckPortIOPermission(pIemCpu, pCtx, u16Port, OP_SIZE / 8);
1315
if (rcStrict != VINF_SUCCESS)
1319
ADDR_TYPE uCounterReg = pCtx->ADDR_rCX;
1320
if (uCounterReg == 0)
1322
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1323
return VINF_SUCCESS;
1326
PCCPUMSELREGHID pHid = iemSRegGetHid(pIemCpu, iEffSeg);
1328
rcStrict = iemMemSegCheckReadAccessEx(pIemCpu, pHid, iEffSeg, &uBaseAddr);
1329
if (rcStrict != VINF_SUCCESS)
1332
int8_t const cbIncr = pCtx->eflags.Bits.u1DF ? -(OP_SIZE / 8) : (OP_SIZE / 8);
1333
ADDR_TYPE uAddrReg = pCtx->ADDR_rSI;
1341
* Do segmentation and virtual page stuff.
1343
ADDR2_TYPE uVirtAddr = uAddrReg + (ADDR2_TYPE)uBaseAddr;
1344
uint32_t cLeftPage = (PAGE_SIZE - (uVirtAddr & PAGE_OFFSET_MASK)) / (OP_SIZE / 8);
1345
if (cLeftPage > uCounterReg)
1346
cLeftPage = uCounterReg;
1347
if ( cLeftPage > 0 /* can be null if unaligned, do one fallback round. */
1348
&& cbIncr > 0 /** @todo Implement reverse direction string ops. */
1349
&& ( IS_64_BIT_CODE(pIemCpu)
1350
|| ( uAddrReg < pHid->u32Limit
1351
&& uAddrReg + (cLeftPage * (OP_SIZE / 8)) <= pHid->u32Limit)
1356
rcStrict = iemMemPageTranslateAndCheckAccess(pIemCpu, uVirtAddr, IEM_ACCESS_DATA_R, &GCPhysMem);
1357
if (rcStrict != VINF_SUCCESS)
1361
* If we can map the page without trouble, we would've liked to use
1362
* an string I/O method to do the work, but the current IOM
1363
* interface doesn't match our current approach. So, do a regular
1366
/** @todo Change the I/O manager interface to make use of
1367
* mapped buffers instead of leaving those bits to the
1368
* device implementation? */
1369
PGMPAGEMAPLOCK PgLockMem;
1370
OP_TYPE const *puMem;
1371
rcStrict = iemMemPageMap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, (void **)&puMem, &PgLockMem);
1372
if (rcStrict == VINF_SUCCESS)
1375
while (off < cLeftPage)
1377
uint32_t u32Value = *puMem++;
1378
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1379
rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, u32Value, OP_SIZE / 8);
1381
rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, u32Value, OP_SIZE / 8);
1382
if (IOM_SUCCESS(rcStrict))
1384
pCtx->ADDR_rSI = uAddrReg += cbIncr;
1385
pCtx->ADDR_rCX = --uCounterReg;
1387
if (rcStrict != VINF_SUCCESS)
1389
if (IOM_SUCCESS(rcStrict))
1391
rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1392
if (uCounterReg == 0)
1393
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1395
iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1400
iemMemPageUnmap(pIemCpu, GCPhysMem, IEM_ACCESS_DATA_R, puMem, &PgLockMem);
1402
/* If unaligned, we drop thru and do the page crossing access
1403
below. Otherwise, do the next page. */
1404
if (!(uVirtAddr & (OP_SIZE - 1)))
1406
if (uCounterReg == 0)
1413
* Fallback - slow processing till the end of the current page.
1414
* In the cross page boundrary case we will end up here with cLeftPage
1415
* as 0, we execute one loop then.
1417
* Note! We ASSUME the CPU will raise #PF or #GP before access the
1418
* I/O port, otherwise it wouldn't really be restartable.
1420
/** @todo investigate what the CPU actually does with \#PF/\#GP
1425
rcStrict = RT_CONCAT(iemMemFetchDataU,OP_SIZE)(pIemCpu, &uValue, iEffSeg, uAddrReg);
1426
if (rcStrict != VINF_SUCCESS)
1429
if (!IEM_VERIFICATION_ENABLED(pIemCpu))
1430
rcStrict = IOMIOPortWrite(pVM, pVCpu, u16Port, uValue, OP_SIZE / 8);
1432
rcStrict = iemVerifyFakeIOPortWrite(pIemCpu, u16Port, uValue, OP_SIZE / 8);
1433
if (IOM_SUCCESS(rcStrict))
1435
pCtx->ADDR_rSI = uAddrReg += cbIncr;
1436
pCtx->ADDR_rCX = --uCounterReg;
1439
if (rcStrict != VINF_SUCCESS)
1441
if (IOM_SUCCESS(rcStrict))
1443
if (uCounterReg == 0)
1444
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1445
rcStrict = iemSetPassUpStatus(pIemCpu, rcStrict);
1449
} while ((int32_t)cLeftPage > 0);
1450
} while (uCounterReg != 0);
1455
iemRegAddToRipAndClearRF(pIemCpu, cbInstr);
1456
return VINF_SUCCESS;
1459
#endif /* OP_SIZE != 64-bit */
1472
#undef IS_64_BIT_CODE