2
* Copyright 1990,91 by Thomas Roell, Dinkelscherben, Germany.
4
* Permission to use, copy, modify, distribute, and sell this software and its
5
* documentation for any purpose is hereby granted without fee, provided that
6
* the above copyright notice appear in all copies and that both that
7
* copyright notice and this permission notice appear in supporting
8
* documentation, and that the name of Thomas Roell not be used in
9
* advertising or publicity pertaining to distribution of the software without
10
* specific, written prior permission. Thomas Roell makes no representations
11
* about the suitability of this software for any purpose. It is provided
12
* "as is" without express or implied warranty.
14
* THOMAS ROELL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16
* EVENT SHALL THOMAS ROELL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
20
* PERFORMANCE OF THIS SOFTWARE.
24
* Copyright (c) 1994-2003 by The XFree86 Project, Inc.
26
* Permission is hereby granted, free of charge, to any person obtaining a
27
* copy of this software and associated documentation files (the "Software"),
28
* to deal in the Software without restriction, including without limitation
29
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
30
* and/or sell copies of the Software, and to permit persons to whom the
31
* Software is furnished to do so, subject to the following conditions:
33
* The above copyright notice and this permission notice shall be included in
34
* all copies or substantial portions of the Software.
36
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
37
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
38
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
39
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
40
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
41
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
42
* OTHER DEALINGS IN THE SOFTWARE.
44
* Except as contained in this notice, the name of the copyright holder(s)
45
* and author(s) shall not be used in advertising or otherwise to promote
46
* the sale, use or other dealings in this Software without prior written
47
* authorization from the copyright holder(s) and author(s).
54
#if defined(__SUNPRO_C)
55
# define DO_PROTOTYPES
58
/* Allow drivers to use the GCC-supported __inline__ and/or __inline. */
60
# if defined(__GNUC__)
61
/* gcc has __inline__ */
62
# elif defined(__HIGHC__)
63
# define __inline__ _Inline
65
# define __inline__ /**/
67
# endif /* __inline__ */
69
# if defined(__GNUC__)
70
/* gcc has __inline */
71
# elif defined(__HIGHC__)
72
# define __inline _Inline
74
# define __inline /**/
76
# endif /* __inline */
78
/* Support gcc's __FUNCTION__ for people using other compilers */
79
#if !defined(__GNUC__) && !defined(__FUNCTION__)
80
# define __FUNCTION__ __func__ /* C99 */
83
# if defined(NO_INLINE) || defined(DO_PROTOTYPES)
85
# if !defined(__arm__)
86
# if !defined(__sparc__) && !defined(__sparc) && !defined(__arm32__) \
87
&& !(defined(__alpha__) && defined(linux)) \
88
&& !(defined(__ia64__) && defined(linux)) \
90
extern void outb(unsigned short, unsigned char);
91
extern void outw(unsigned short, unsigned short);
92
extern void outl(unsigned short, unsigned int);
93
extern unsigned int inb(unsigned short);
94
extern unsigned int inw(unsigned short);
95
extern unsigned int inl(unsigned short);
97
# else /* __sparc__, __arm32__, __alpha__*/
99
extern void outb(unsigned long, unsigned char);
100
extern void outw(unsigned long, unsigned short);
101
extern void outl(unsigned long, unsigned int);
102
extern unsigned int inb(unsigned long);
103
extern unsigned int inw(unsigned long);
104
extern unsigned int inl(unsigned long);
106
# endif /* __sparc__, __arm32__, __alpha__ */
107
# endif /* __arm__ */
109
extern unsigned long ldq_u(unsigned long *);
110
extern unsigned long ldl_u(unsigned int *);
111
extern unsigned long ldw_u(unsigned short *);
112
extern void stq_u(unsigned long, unsigned long *);
113
extern void stl_u(unsigned long, unsigned int *);
114
extern void stw_u(unsigned long, unsigned short *);
115
extern void mem_barrier(void);
116
extern void write_mem_barrier(void);
117
extern void stl_brx(unsigned long, volatile unsigned char *, int);
118
extern void stw_brx(unsigned short, volatile unsigned char *, int);
119
extern unsigned long ldl_brx(volatile unsigned char *, int);
120
extern unsigned short ldw_brx(volatile unsigned char *, int);
126
# if (defined(linux) || defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) && (defined(__alpha__))
129
/* for Linux on Alpha, we use the LIBC _inx/_outx routines */
130
/* note that the appropriate setup via "ioperm" needs to be done */
131
/* *before* any inx/outx is done. */
133
extern void (*_alpha_outb)(char val, unsigned long port);
134
static __inline__ void
135
outb(unsigned long port, unsigned char val)
137
_alpha_outb(val, port);
140
extern void (*_alpha_outw)(short val, unsigned long port);
141
static __inline__ void
142
outw(unsigned long port, unsigned short val)
144
_alpha_outw(val, port);
147
extern void (*_alpha_outl)(int val, unsigned long port);
148
static __inline__ void
149
outl(unsigned long port, unsigned int val)
151
_alpha_outl(val, port);
154
extern unsigned int (*_alpha_inb)(unsigned long port);
155
static __inline__ unsigned int
156
inb(unsigned long port)
158
return _alpha_inb(port);
161
extern unsigned int (*_alpha_inw)(unsigned long port);
162
static __inline__ unsigned int
163
inw(unsigned long port)
165
return _alpha_inw(port);
168
extern unsigned int (*_alpha_inl)(unsigned long port);
169
static __inline__ unsigned int
170
inl(unsigned long port)
172
return _alpha_inl(port);
177
# if (defined(__FreeBSD__) || defined(__OpenBSD__)) \
178
&& !defined(DO_PROTOTYPES)
180
/* for FreeBSD and OpenBSD on Alpha, we use the libio (resp. libalpha) */
181
/* inx/outx routines */
182
/* note that the appropriate setup via "ioperm" needs to be done */
183
/* *before* any inx/outx is done. */
185
extern void outb(unsigned int port, unsigned char val);
186
extern void outw(unsigned int port, unsigned short val);
187
extern void outl(unsigned int port, unsigned int val);
188
extern unsigned char inb(unsigned int port);
189
extern unsigned short inw(unsigned int port);
190
extern unsigned int inl(unsigned int port);
192
# endif /* (__FreeBSD__ || __OpenBSD__ ) && !DO_PROTOTYPES */
195
#if defined(__NetBSD__)
196
#include <machine/pio.h>
197
#endif /* __NetBSD__ */
200
* inline functions to do unaligned accesses
201
* from linux/include/asm-alpha/unaligned.h
205
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
206
* packed structures to talk about such things with.
209
struct __una_u64 { unsigned long x __attribute__((packed)); };
210
struct __una_u32 { unsigned int x __attribute__((packed)); };
211
struct __una_u16 { unsigned short x __attribute__((packed)); };
214
* Elemental unaligned loads
216
/* let's try making these things static */
218
static __inline__ unsigned long ldq_u(unsigned long * r11)
220
# if defined(__GNUC__)
221
const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
225
__asm__("ldq_u %0,%3\n\t"
229
:"=&r" (r1), "=&r" (r2)
232
"m" (*(const unsigned long *)(7+(char *) r11)));
237
static __inline__ unsigned long ldl_u(unsigned int * r11)
239
# if defined(__GNUC__)
240
const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
244
__asm__("ldq_u %0,%3\n\t"
248
:"=&r" (r1), "=&r" (r2)
251
"m" (*(const unsigned long *)(3+(char *) r11)));
256
static __inline__ unsigned long ldw_u(unsigned short * r11)
258
# if defined(__GNUC__)
259
const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
263
__asm__("ldq_u %0,%3\n\t"
267
:"=&r" (r1), "=&r" (r2)
270
"m" (*(const unsigned long *)(1+(char *) r11)));
276
* Elemental unaligned stores
279
static __inline__ void stq_u(unsigned long r5, unsigned long * r11)
281
# if defined(__GNUC__)
282
struct __una_u64 *ptr = (struct __una_u64 *) r11;
285
unsigned long r1,r2,r3,r4;
287
__asm__("ldq_u %3,%1\n\t"
298
"=m" (*(unsigned long *)(7+(char *) r11)),
299
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
300
:"r" (r5), "r" (r11));
304
static __inline__ void stl_u(unsigned long r5, unsigned int * r11)
306
# if defined(__GNUC__)
307
struct __una_u32 *ptr = (struct __una_u32 *) r11;
310
unsigned long r1,r2,r3,r4;
312
__asm__("ldq_u %3,%1\n\t"
323
"=m" (*(unsigned long *)(3+(char *) r11)),
324
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
325
:"r" (r5), "r" (r11));
329
static __inline__ void stw_u(unsigned long r5, unsigned short * r11)
331
# if defined(__GNUC__)
332
struct __una_u16 *ptr = (struct __una_u16 *) r11;
335
unsigned long r1,r2,r3,r4;
337
__asm__("ldq_u %3,%1\n\t"
348
"=m" (*(unsigned long *)(1+(char *) r11)),
349
"=&r" (r1), "=&r" (r2), "=&r" (r3), "=&r" (r4)
350
:"r" (r5), "r" (r11));
354
/* to flush the I-cache before jumping to code which just got loaded */
356
# define istream_mem_barrier() \
357
__asm__ __volatile__("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
358
# define mem_barrier() __asm__ __volatile__("mb" : : : "memory")
360
# define write_mem_barrier() __asm__ __volatile__("wmb" : : : "memory")
361
# else /* ECOFF gas 2.6 doesn't know "wmb" :-( */
362
# define write_mem_barrier() mem_barrier()
366
# elif defined(linux) && defined(__ia64__)
368
# include <inttypes.h>
372
struct __una_u64 { uint64_t x __attribute__((packed)); };
373
struct __una_u32 { uint32_t x __attribute__((packed)); };
374
struct __una_u16 { uint16_t x __attribute__((packed)); };
376
static __inline__ unsigned long
377
__uldq (const unsigned long * r11)
379
const struct __una_u64 *ptr = (const struct __una_u64 *) r11;
383
static __inline__ unsigned long
384
__uldl (const unsigned int * r11)
386
const struct __una_u32 *ptr = (const struct __una_u32 *) r11;
390
static __inline__ unsigned long
391
__uldw (const unsigned short * r11)
393
const struct __una_u16 *ptr = (const struct __una_u16 *) r11;
397
static __inline__ void
398
__ustq (unsigned long r5, unsigned long * r11)
400
struct __una_u64 *ptr = (struct __una_u64 *) r11;
404
static __inline__ void
405
__ustl (unsigned long r5, unsigned int * r11)
407
struct __una_u32 *ptr = (struct __una_u32 *) r11;
411
static __inline__ void
412
__ustw (unsigned long r5, unsigned short * r11)
414
struct __una_u16 *ptr = (struct __una_u16 *) r11;
418
# define ldq_u(p) __uldq(p)
419
# define ldl_u(p) __uldl(p)
420
# define ldw_u(p) __uldw(p)
421
# define stq_u(v,p) __ustq(v,p)
422
# define stl_u(v,p) __ustl(v,p)
423
# define stw_u(v,p) __ustw(v,p)
425
# ifndef __INTEL_COMPILER
426
# define mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
427
# define write_mem_barrier() __asm__ __volatile__ ("mf" ::: "memory")
429
# include "ia64intrin.h"
430
# define mem_barrier() __mf()
431
# define write_mem_barrier() __mf()
435
* This is overkill, but for different reasons depending on where it is used.
436
* This is thus general enough to be used everywhere cache flushes are needed.
437
* It doesn't handle memory access serialisation by other processors, though.
439
# ifndef __INTEL_COMPILER
440
# define ia64_flush_cache(Addr) \
441
__asm__ __volatile__ ( \
446
:: "r"(Addr) : "memory")
448
# define ia64_flush_cache(Addr) { \
461
extern void outb(unsigned long port, unsigned char val);
462
extern void outw(unsigned long port, unsigned short val);
463
extern void outl(unsigned long port, unsigned int val);
464
extern unsigned int inb(unsigned long port);
465
extern unsigned int inw(unsigned long port);
466
extern unsigned int inl(unsigned long port);
468
# elif defined(linux) && defined(__amd64__)
470
# include <inttypes.h>
472
# define ldq_u(p) (*((unsigned long *)(p)))
473
# define ldl_u(p) (*((unsigned int *)(p)))
474
# define ldw_u(p) (*((unsigned short *)(p)))
475
# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
476
# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
477
# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
479
# define mem_barrier() \
480
__asm__ __volatile__ ("lock; addl $0,0(%%rsp)": : :"memory")
481
# define write_mem_barrier() \
482
__asm__ __volatile__ ("": : :"memory")
485
static __inline__ void
486
outb(unsigned short port, unsigned char val)
488
__asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
492
static __inline__ void
493
outw(unsigned short port, unsigned short val)
495
__asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
498
static __inline__ void
499
outl(unsigned short port, unsigned int val)
501
__asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
504
static __inline__ unsigned int
505
inb(unsigned short port)
508
__asm__ __volatile__("inb %1,%0" :
514
static __inline__ unsigned int
515
inw(unsigned short port)
518
__asm__ __volatile__("inw %1,%0" :
524
static __inline__ unsigned int
525
inl(unsigned short port)
528
__asm__ __volatile__("inl %1,%0" :
534
# elif (defined(linux) || defined(sun) || defined(__OpenBSD__) || defined(__FreeBSD__)) && defined(__sparc__)
540
# define barrier() __asm__ __volatile__(".word 0x8143e00a": : :"memory")
542
static __inline__ void
543
outb(unsigned long port, unsigned char val)
545
__asm__ __volatile__("stba %0, [%1] %2"
547
: "r" (val), "r" (port), "i" (ASI_PL));
551
static __inline__ void
552
outw(unsigned long port, unsigned short val)
554
__asm__ __volatile__("stha %0, [%1] %2"
556
: "r" (val), "r" (port), "i" (ASI_PL));
560
static __inline__ void
561
outl(unsigned long port, unsigned int val)
563
__asm__ __volatile__("sta %0, [%1] %2"
565
: "r" (val), "r" (port), "i" (ASI_PL));
569
static __inline__ unsigned int
570
inb(unsigned long port)
573
__asm__ __volatile__("lduba [%1] %2, %0"
575
: "r" (port), "i" (ASI_PL));
579
static __inline__ unsigned int
580
inw(unsigned long port)
583
__asm__ __volatile__("lduha [%1] %2, %0"
585
: "r" (port), "i" (ASI_PL));
589
static __inline__ unsigned int
590
inl(unsigned long port)
593
__asm__ __volatile__("lda [%1] %2, %0"
595
: "r" (port), "i" (ASI_PL));
599
static __inline__ unsigned char
600
xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
602
unsigned long addr = ((unsigned long)base) + offset;
605
__asm__ __volatile__("lduba [%1] %2, %0"
607
: "r" (addr), "i" (ASI_PL));
611
static __inline__ unsigned short
612
xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset)
614
unsigned long addr = ((unsigned long)base) + offset;
617
__asm__ __volatile__("lduh [%1], %0"
623
static __inline__ unsigned short
624
xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset)
626
unsigned long addr = ((unsigned long)base) + offset;
629
__asm__ __volatile__("lduha [%1] %2, %0"
631
: "r" (addr), "i" (ASI_PL));
635
static __inline__ unsigned int
636
xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
638
unsigned long addr = ((unsigned long)base) + offset;
641
__asm__ __volatile__("ld [%1], %0"
647
static __inline__ unsigned int
648
xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset)
650
unsigned long addr = ((unsigned long)base) + offset;
653
__asm__ __volatile__("lda [%1] %2, %0"
655
: "r" (addr), "i" (ASI_PL));
659
static __inline__ void
660
xf86WriteMmio8(__volatile__ void *base, const unsigned long offset,
661
const unsigned int val)
663
unsigned long addr = ((unsigned long)base) + offset;
665
__asm__ __volatile__("stba %0, [%1] %2"
667
: "r" (val), "r" (addr), "i" (ASI_PL));
671
static __inline__ void
672
xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset,
673
const unsigned int val)
675
unsigned long addr = ((unsigned long)base) + offset;
677
__asm__ __volatile__("sth %0, [%1]"
679
: "r" (val), "r" (addr));
683
static __inline__ void
684
xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset,
685
const unsigned int val)
687
unsigned long addr = ((unsigned long)base) + offset;
689
__asm__ __volatile__("stha %0, [%1] %2"
691
: "r" (val), "r" (addr), "i" (ASI_PL));
695
static __inline__ void
696
xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
697
const unsigned int val)
699
unsigned long addr = ((unsigned long)base) + offset;
701
__asm__ __volatile__("st %0, [%1]"
703
: "r" (val), "r" (addr));
707
static __inline__ void
708
xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset,
709
const unsigned int val)
711
unsigned long addr = ((unsigned long)base) + offset;
713
__asm__ __volatile__("sta %0, [%1] %2"
715
: "r" (val), "r" (addr), "i" (ASI_PL));
719
static __inline__ void
720
xf86WriteMmio8NB(__volatile__ void *base, const unsigned long offset,
721
const unsigned int val)
723
unsigned long addr = ((unsigned long)base) + offset;
725
__asm__ __volatile__("stba %0, [%1] %2"
727
: "r" (val), "r" (addr), "i" (ASI_PL));
730
static __inline__ void
731
xf86WriteMmio16BeNB(__volatile__ void *base, const unsigned long offset,
732
const unsigned int val)
734
unsigned long addr = ((unsigned long)base) + offset;
736
__asm__ __volatile__("sth %0, [%1]"
738
: "r" (val), "r" (addr));
741
static __inline__ void
742
xf86WriteMmio16LeNB(__volatile__ void *base, const unsigned long offset,
743
const unsigned int val)
745
unsigned long addr = ((unsigned long)base) + offset;
747
__asm__ __volatile__("stha %0, [%1] %2"
749
: "r" (val), "r" (addr), "i" (ASI_PL));
752
static __inline__ void
753
xf86WriteMmio32BeNB(__volatile__ void *base, const unsigned long offset,
754
const unsigned int val)
756
unsigned long addr = ((unsigned long)base) + offset;
758
__asm__ __volatile__("st %0, [%1]"
760
: "r" (val), "r" (addr));
763
static __inline__ void
764
xf86WriteMmio32LeNB(__volatile__ void *base, const unsigned long offset,
765
const unsigned int val)
767
unsigned long addr = ((unsigned long)base) + offset;
769
__asm__ __volatile__("sta %0, [%1] %2"
771
: "r" (val), "r" (addr), "i" (ASI_PL));
776
* EGCS 1.1 knows about arbitrary unaligned loads. Define some
777
* packed structures to talk about such things with.
780
# if defined(__arch64__) || defined(__sparcv9)
781
struct __una_u64 { unsigned long x __attribute__((packed)); };
783
struct __una_u32 { unsigned int x __attribute__((packed)); };
784
struct __una_u16 { unsigned short x __attribute__((packed)); };
786
static __inline__ unsigned long ldq_u(unsigned long *p)
788
# if defined(__GNUC__)
789
# if defined(__arch64__) || defined(__sparcv9)
790
const struct __una_u64 *ptr = (const struct __una_u64 *) p;
792
const struct __una_u32 *ptr = (const struct __una_u32 *) p;
797
memmove(&ret, p, sizeof(*p));
802
static __inline__ unsigned long ldl_u(unsigned int *p)
804
# if defined(__GNUC__)
805
const struct __una_u32 *ptr = (const struct __una_u32 *) p;
809
memmove(&ret, p, sizeof(*p));
814
static __inline__ unsigned long ldw_u(unsigned short *p)
816
# if defined(__GNUC__)
817
const struct __una_u16 *ptr = (const struct __una_u16 *) p;
821
memmove(&ret, p, sizeof(*p));
826
static __inline__ void stq_u(unsigned long val, unsigned long *p)
828
# if defined(__GNUC__)
829
# if defined(__arch64__) || defined(__sparcv9)
830
struct __una_u64 *ptr = (struct __una_u64 *) p;
832
struct __una_u32 *ptr = (struct __una_u32 *) p;
836
unsigned long tmp = val;
837
memmove(p, &tmp, sizeof(*p));
841
static __inline__ void stl_u(unsigned long val, unsigned int *p)
843
# if defined(__GNUC__)
844
struct __una_u32 *ptr = (struct __una_u32 *) p;
847
unsigned int tmp = val;
848
memmove(p, &tmp, sizeof(*p));
852
static __inline__ void stw_u(unsigned long val, unsigned short *p)
854
# if defined(__GNUC__)
855
struct __una_u16 *ptr = (struct __una_u16 *) p;
858
unsigned short tmp = val;
859
memmove(p, &tmp, sizeof(*p));
863
# define mem_barrier() /* XXX: nop for now */
864
# define write_mem_barrier() /* XXX: nop for now */
866
# elif defined(__mips__) || (defined(__arm32__) && !defined(__linux__))
868
# define PORT_SIZE long
870
# define PORT_SIZE short
873
unsigned int IOPortBase; /* Memory mapped I/O port area */
875
static __inline__ void
876
outb(unsigned PORT_SIZE port, unsigned char val)
878
*(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
881
static __inline__ void
882
outw(unsigned PORT_SIZE port, unsigned short val)
884
*(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
887
static __inline__ void
888
outl(unsigned PORT_SIZE port, unsigned int val)
890
*(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase) = val;
893
static __inline__ unsigned int
894
inb(unsigned PORT_SIZE port)
896
return *(volatile unsigned char*)(((unsigned PORT_SIZE)(port))+IOPortBase);
899
static __inline__ unsigned int
900
inw(unsigned PORT_SIZE port)
902
return *(volatile unsigned short*)(((unsigned PORT_SIZE)(port))+IOPortBase);
905
static __inline__ unsigned int
906
inl(unsigned PORT_SIZE port)
908
return *(volatile unsigned int*)(((unsigned PORT_SIZE)(port))+IOPortBase);
912
# if defined(__mips__)
913
static __inline__ unsigned long ldq_u(unsigned long * r11)
916
__asm__("lwr %0,%2\n\t"
921
"m" (*(unsigned long *)(3+(char *) r11)));
925
static __inline__ unsigned long ldl_u(unsigned int * r11)
928
__asm__("lwr %0,%2\n\t"
933
"m" (*(unsigned long *)(3+(char *) r11)));
937
static __inline__ unsigned long ldw_u(unsigned short * r11)
940
__asm__("lwr %0,%2\n\t"
945
"m" (*(unsigned long *)(1+(char *) r11)));
949
# ifdef linux /* don't mess with other OSs */
952
* EGCS 1.1 knows about arbitrary unaligned loads (and we don't support older
953
* versions anyway. Define some packed structures to talk about such things
957
struct __una_u32 { unsigned int x __attribute__((packed)); };
958
struct __una_u16 { unsigned short x __attribute__((packed)); };
960
static __inline__ void stw_u(unsigned long val, unsigned short *p)
962
struct __una_u16 *ptr = (struct __una_u16 *) p;
966
static __inline__ void stl_u(unsigned long val, unsigned int *p)
968
struct __una_u32 *ptr = (struct __una_u32 *) p;
972
# if X_BYTE_ORDER == X_BIG_ENDIAN
973
static __inline__ unsigned int
974
xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
976
unsigned long addr = ((unsigned long)base) + offset;
979
__asm__ __volatile__("lw %0, 0(%1)"
985
static __inline__ void
986
xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
987
const unsigned int val)
989
unsigned long addr = ((unsigned long)base) + offset;
991
__asm__ __volatile__("sw %0, 0(%1)"
993
: "r" (val), "r" (addr));
997
# define mem_barrier() \
998
__asm__ __volatile__( \
999
"# prevent instructions being moved around\n\t" \
1000
".set\tnoreorder\n\t" \
1001
"# 8 nops to fool the R4400 pipeline\n\t" \
1002
"nop;nop;nop;nop;nop;nop;nop;nop\n\t" \
1007
# define write_mem_barrier() mem_barrier()
1011
# define stq_u(v,p) stl_u(v,p)
1012
# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
1013
(*(unsigned char *)(p)+1) = ((v) >> 8); \
1014
(*(unsigned char *)(p)+2) = ((v) >> 16); \
1015
(*(unsigned char *)(p)+3) = ((v) >> 24)
1017
# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
1018
(*(unsigned char *)(p)+1) = ((v) >> 8)
1020
# define mem_barrier() /* NOP */
1021
# endif /* !linux */
1022
# endif /* __mips__ */
1024
# if defined(__arm32__)
1025
# define ldq_u(p) (*((unsigned long *)(p)))
1026
# define ldl_u(p) (*((unsigned int *)(p)))
1027
# define ldw_u(p) (*((unsigned short *)(p)))
1028
# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1029
# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1030
# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1031
# define mem_barrier() /* NOP */
1032
# define write_mem_barrier() /* NOP */
1033
# endif /* __arm32__ */
1035
# elif (defined(linux) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__FreeBSD__)) && defined(__powerpc__)
1038
# define MAP_FAILED ((void *)-1)
1041
extern volatile unsigned char *ioBase;
1043
#if defined(linux) && defined(__powerpc64__)
1044
# include <linux/version.h>
1045
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
1046
# include <asm/memory.h>
1048
#endif /* defined(linux) && defined(__powerpc64__) */
1049
#ifndef eieio /* We deal with arch-specific eieio() routines above... */
1050
# define eieio() __asm__ __volatile__ ("eieio" ::: "memory")
1053
static __inline__ unsigned char
1054
xf86ReadMmio8(__volatile__ void *base, const unsigned long offset)
1056
register unsigned char val;
1057
__asm__ __volatile__(
1061
: "b" (base), "r" (offset),
1062
"m" (*((volatile unsigned char *)base+offset)));
1066
static __inline__ unsigned short
1067
xf86ReadMmio16Be(__volatile__ void *base, const unsigned long offset)
1069
register unsigned short val;
1070
__asm__ __volatile__(
1074
: "b" (base), "r" (offset),
1075
"m" (*((volatile unsigned char *)base+offset)));
1079
static __inline__ unsigned short
1080
xf86ReadMmio16Le(__volatile__ void *base, const unsigned long offset)
1082
register unsigned short val;
1083
__asm__ __volatile__(
1084
"lhbrx %0,%1,%2\n\t"
1087
: "b" (base), "r" (offset),
1088
"m" (*((volatile unsigned char *)base+offset)));
1092
static __inline__ unsigned int
1093
xf86ReadMmio32Be(__volatile__ void *base, const unsigned long offset)
1095
register unsigned int val;
1096
__asm__ __volatile__(
1100
: "b" (base), "r" (offset),
1101
"m" (*((volatile unsigned char *)base+offset)));
1105
static __inline__ unsigned int
1106
xf86ReadMmio32Le(__volatile__ void *base, const unsigned long offset)
1108
register unsigned int val;
1109
__asm__ __volatile__(
1110
"lwbrx %0,%1,%2\n\t"
1113
: "b" (base), "r" (offset),
1114
"m" (*((volatile unsigned char *)base+offset)));
1118
static __inline__ void
1119
xf86WriteMmioNB8(__volatile__ void *base, const unsigned long offset,
1120
const unsigned char val)
1122
__asm__ __volatile__(
1124
: "=m" (*((volatile unsigned char *)base+offset))
1125
: "r" (val), "b" (base), "r" (offset));
1128
static __inline__ void
1129
xf86WriteMmioNB16Le(__volatile__ void *base, const unsigned long offset,
1130
const unsigned short val)
1132
__asm__ __volatile__(
1133
"sthbrx %1,%2,%3\n\t"
1134
: "=m" (*((volatile unsigned char *)base+offset))
1135
: "r" (val), "b" (base), "r" (offset));
1138
static __inline__ void
1139
xf86WriteMmioNB16Be(__volatile__ void *base, const unsigned long offset,
1140
const unsigned short val)
1142
__asm__ __volatile__(
1144
: "=m" (*((volatile unsigned char *)base+offset))
1145
: "r" (val), "b" (base), "r" (offset));
1148
static __inline__ void
1149
xf86WriteMmioNB32Le(__volatile__ void *base, const unsigned long offset,
1150
const unsigned int val)
1152
__asm__ __volatile__(
1153
"stwbrx %1,%2,%3\n\t"
1154
: "=m" (*((volatile unsigned char *)base+offset))
1155
: "r" (val), "b" (base), "r" (offset));
1158
static __inline__ void
1159
xf86WriteMmioNB32Be(__volatile__ void *base, const unsigned long offset,
1160
const unsigned int val)
1162
__asm__ __volatile__(
1164
: "=m" (*((volatile unsigned char *)base+offset))
1165
: "r" (val), "b" (base), "r" (offset));
1168
static __inline__ void
1169
xf86WriteMmio8(__volatile__ void *base, const unsigned long offset,
1170
const unsigned char val)
1172
xf86WriteMmioNB8(base, offset, val);
1176
static __inline__ void
1177
xf86WriteMmio16Le(__volatile__ void *base, const unsigned long offset,
1178
const unsigned short val)
1180
xf86WriteMmioNB16Le(base, offset, val);
1184
static __inline__ void
1185
xf86WriteMmio16Be(__volatile__ void *base, const unsigned long offset,
1186
const unsigned short val)
1188
xf86WriteMmioNB16Be(base, offset, val);
1192
static __inline__ void
1193
xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset,
1194
const unsigned int val)
1196
xf86WriteMmioNB32Le(base, offset, val);
1200
static __inline__ void
1201
xf86WriteMmio32Be(__volatile__ void *base, const unsigned long offset,
1202
const unsigned int val)
1204
xf86WriteMmioNB32Be(base, offset, val);
1209
static __inline__ void
1210
outb(unsigned short port, unsigned char value)
1212
if(ioBase == MAP_FAILED) return;
1213
xf86WriteMmio8((void *)ioBase, port, value);
1216
static __inline__ void
1217
outw(unsigned short port, unsigned short value)
1219
if(ioBase == MAP_FAILED) return;
1220
xf86WriteMmio16Le((void *)ioBase, port, value);
1223
static __inline__ void
1224
outl(unsigned short port, unsigned int value)
1226
if(ioBase == MAP_FAILED) return;
1227
xf86WriteMmio32Le((void *)ioBase, port, value);
1230
static __inline__ unsigned int
1231
inb(unsigned short port)
1233
if(ioBase == MAP_FAILED) return 0;
1234
return xf86ReadMmio8((void *)ioBase, port);
1237
static __inline__ unsigned int
1238
inw(unsigned short port)
1240
if(ioBase == MAP_FAILED) return 0;
1241
return xf86ReadMmio16Le((void *)ioBase, port);
1244
static __inline__ unsigned int
1245
inl(unsigned short port)
1247
if(ioBase == MAP_FAILED) return 0;
1248
return xf86ReadMmio32Le((void *)ioBase, port);
1251
# define ldq_u(p) ldl_u(p)
1252
# define ldl_u(p) ((*(unsigned char *)(p)) | \
1253
(*((unsigned char *)(p)+1)<<8) | \
1254
(*((unsigned char *)(p)+2)<<16) | \
1255
(*((unsigned char *)(p)+3)<<24))
1256
# define ldw_u(p) ((*(unsigned char *)(p)) | \
1257
(*((unsigned char *)(p)+1)<<8))
1259
# define stq_u(v,p) stl_u(v,p)
1260
# define stl_u(v,p) (*(unsigned char *)(p)) = (v); \
1261
(*((unsigned char *)(p)+1)) = ((v) >> 8); \
1262
(*((unsigned char *)(p)+2)) = ((v) >> 16); \
1263
(*((unsigned char *)(p)+3)) = ((v) >> 24)
1264
# define stw_u(v,p) (*(unsigned char *)(p)) = (v); \
1265
(*((unsigned char *)(p)+1)) = ((v) >> 8)
1267
# define mem_barrier() eieio()
1268
# define write_mem_barrier() eieio()
1270
#elif defined(__arm__) && defined(__linux__)
1272
#define ldq_u(p) (*((unsigned long *)(p)))
1273
#define ldl_u(p) (*((unsigned int *)(p)))
1274
#define ldw_u(p) (*((unsigned short *)(p)))
1275
#define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1276
#define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1277
#define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1278
#define mem_barrier() /* NOP */
1279
#define write_mem_barrier() /* NOP */
1281
/* for Linux on ARM, we use the LIBC inx/outx routines */
1282
/* note that the appropriate setup via "ioperm" needs to be done */
1283
/* *before* any inx/outx is done. */
1287
static __inline__ void
1288
xf_outb(unsigned short port, unsigned char val)
1293
static __inline__ void
1294
xf_outw(unsigned short port, unsigned short val)
1299
static __inline__ void
1300
xf_outl(unsigned short port, unsigned int val)
1305
#define outb xf_outb
1306
#define outw xf_outw
1307
#define outl xf_outl
1309
#define arm_flush_cache(addr) \
1311
register unsigned long _beg __asm ("a1") = (unsigned long) (addr); \
1312
register unsigned long _end __asm ("a2") = (unsigned long) (addr) + 4;\
1313
register unsigned long _flg __asm ("a3") = 0; \
1314
__asm __volatile ("swi 0x9f0002 @ sys_cacheflush" \
1316
: "0" (_beg), "r" (_end), "r" (_flg)); \
1321
# define ldq_u(p) (*((unsigned long *)(p)))
1322
# define ldl_u(p) (*((unsigned int *)(p)))
1323
# define ldw_u(p) (*((unsigned short *)(p)))
1324
# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1325
# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1326
# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1327
# define mem_barrier() /* NOP */
1328
# define write_mem_barrier() /* NOP */
1330
# if !defined(__SUNPRO_C)
1331
# if !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__) && !defined(__s390__) && !defined(__m32r__)
1335
* If gcc uses gas rather than the native assembler, the syntax of these
1336
* inlines has to be different. DHD
1339
static __inline__ void
1340
outb(unsigned short port, unsigned char val)
1342
__asm__ __volatile__("outb %0,%1" : :"a" (val), "d" (port));
1346
static __inline__ void
1347
outw(unsigned short port, unsigned short val)
1349
__asm__ __volatile__("outw %0,%1" : :"a" (val), "d" (port));
1352
static __inline__ void
1353
outl(unsigned short port, unsigned int val)
1355
__asm__ __volatile__("outl %0,%1" : :"a" (val), "d" (port));
1358
static __inline__ unsigned int
1359
inb(unsigned short port)
1362
__asm__ __volatile__("inb %1,%0" :
1368
static __inline__ unsigned int
1369
inw(unsigned short port)
1372
__asm__ __volatile__("inw %1,%0" :
1378
static __inline__ unsigned int
1379
inl(unsigned short port)
1382
__asm__ __volatile__("inl %1,%0" :
1388
# else /* GCCUSESGAS */
1390
static __inline__ void
1391
outb(unsigned short port, unsigned char val)
1393
__asm__ __volatile__("out%B0 (%1)" : :"a" (val), "d" (port));
1396
static __inline__ void
1397
outw(unsigned short port, unsigned short val)
1399
__asm__ __volatile__("out%W0 (%1)" : :"a" (val), "d" (port));
1402
static __inline__ void
1403
outl(unsigned short port, unsigned int val)
1405
__asm__ __volatile__("out%L0 (%1)" : :"a" (val), "d" (port));
1408
static __inline__ unsigned int
1409
inb(unsigned short port)
1412
__asm__ __volatile__("in%B0 (%1)" :
1418
static __inline__ unsigned int
1419
inw(unsigned short port)
1422
__asm__ __volatile__("in%W0 (%1)" :
1428
static __inline__ unsigned int
1429
inl(unsigned short port)
1432
__asm__ __volatile__("in%L0 (%1)" :
1438
# endif /* GCCUSESGAS */
1440
# else /* !defined(FAKEIT) && !defined(__mc68000__) && !defined(__arm__) && !defined(__sh__) && !defined(__hppa__) && !defined(__m32r__) */
1442
static __inline__ void
1443
outb(unsigned short port, unsigned char val)
1447
static __inline__ void
1448
outw(unsigned short port, unsigned short val)
1452
static __inline__ void
1453
outl(unsigned short port, unsigned int val)
1457
static __inline__ unsigned int
1458
inb(unsigned short port)
1463
static __inline__ unsigned int
1464
inw(unsigned short port)
1469
static __inline__ unsigned int
1470
inl(unsigned short port)
1475
# endif /* FAKEIT */
1476
# endif /* __SUNPRO_C */
1481
# if defined(__STDC__) && (__STDC__ == 1)
1487
# if defined(__UNIXWARE__)
1488
# /* avoid including <sys/types.h> for <sys/inline.h> on UnixWare */
1489
# define ushort unsigned short
1490
# define ushort_t unsigned short
1491
# define ulong unsigned long
1492
# define ulong_t unsigned long
1493
# define uint_t unsigned int
1494
# define uchar_t unsigned char
1495
# endif /* __UNIXWARE__ */
1496
# if !defined(__SUNPRO_C)
1497
# include <sys/inline.h>
1500
# include "scoasm.h"
1502
# if !defined(__HIGHC__) && !defined(__SUNPRO_C) || \
1504
# pragma asm partial_optimization outl
1505
# pragma asm partial_optimization outw
1506
# pragma asm partial_optimization outb
1507
# pragma asm partial_optimization inl
1508
# pragma asm partial_optimization inw
1509
# pragma asm partial_optimization inb
1511
# define ldq_u(p) (*((unsigned long *)(p)))
1512
# define ldl_u(p) (*((unsigned int *)(p)))
1513
# define ldw_u(p) (*((unsigned short *)(p)))
1514
# define stq_u(v,p) (*(unsigned long *)(p)) = (v)
1515
# define stl_u(v,p) (*(unsigned int *)(p)) = (v)
1516
# define stw_u(v,p) (*(unsigned short *)(p)) = (v)
1517
# define mem_barrier() /* NOP */
1518
# define write_mem_barrier() /* NOP */
1519
# endif /* __GNUC__ */
1521
# endif /* NO_INLINE */
1524
/* entry points for Mmio memory access routines */
1525
extern int (*xf86ReadMmio8)(void *, unsigned long);
1526
extern int (*xf86ReadMmio16)(void *, unsigned long);
1527
# ifndef STANDALONE_MMIO
1528
extern int (*xf86ReadMmio32)(void *, unsigned long);
1530
/* Some DRI 3D drivers need MMIO_IN32. */
1531
static __inline__ int
1532
xf86ReadMmio32(void *Base, unsigned long Offset)
1534
__asm__ __volatile__("mb" : : : "memory");
1535
return *(volatile unsigned int*)((unsigned long)Base+(Offset));
1538
extern void (*xf86WriteMmio8)(int, void *, unsigned long);
1539
extern void (*xf86WriteMmio16)(int, void *, unsigned long);
1540
extern void (*xf86WriteMmio32)(int, void *, unsigned long);
1541
extern void (*xf86WriteMmioNB8)(int, void *, unsigned long);
1542
extern void (*xf86WriteMmioNB16)(int, void *, unsigned long);
1543
extern void (*xf86WriteMmioNB32)(int, void *, unsigned long);
1544
extern void xf86SlowBCopyFromBus(unsigned char *, unsigned char *, int);
1545
extern void xf86SlowBCopyToBus(unsigned char *, unsigned char *, int);
1547
/* Some macros to hide the system dependencies for MMIO accesses */
1548
/* Changed to kill noise generated by gcc's -Wcast-align */
1549
# define MMIO_IN8(base, offset) (*xf86ReadMmio8)(base, offset)
1550
# define MMIO_IN16(base, offset) (*xf86ReadMmio16)(base, offset)
1551
# ifndef STANDALONE_MMIO
1552
# define MMIO_IN32(base, offset) (*xf86ReadMmio32)(base, offset)
1554
# define MMIO_IN32(base, offset) xf86ReadMmio32(base, offset)
1557
# define MMIO_OUT32(base, offset, val) \
1559
write_mem_barrier(); \
1560
*(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val); \
1562
# define MMIO_ONB32(base, offset, val) \
1563
*(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1565
# define MMIO_OUT8(base, offset, val) \
1566
(*xf86WriteMmio8)((CARD8)(val), base, offset)
1567
# define MMIO_OUT16(base, offset, val) \
1568
(*xf86WriteMmio16)((CARD16)(val), base, offset)
1569
# define MMIO_ONB8(base, offset, val) \
1570
(*xf86WriteMmioNB8)((CARD8)(val), base, offset)
1571
# define MMIO_ONB16(base, offset, val) \
1572
(*xf86WriteMmioNB16)((CARD16)(val), base, offset)
1573
# define MMIO_MOVE32(base, offset, val) \
1574
MMIO_OUT32(base, offset, val)
1576
# elif defined(__powerpc__)
1578
* we provide byteswapping and no byteswapping functions here
1579
* with byteswapping as default,
1580
* drivers that don't need byteswapping should define PPC_MMIO_IS_BE
1582
# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset)
1583
# define MMIO_OUT8(base, offset, val) \
1584
xf86WriteMmio8(base, offset, (CARD8)(val))
1585
# define MMIO_ONB8(base, offset, val) \
1586
xf86WriteMmioNB8(base, offset, (CARD8)(val))
1588
# if defined(PPC_MMIO_IS_BE) /* No byteswapping */
1589
# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset)
1590
# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset)
1591
# define MMIO_OUT16(base, offset, val) \
1592
xf86WriteMmio16Be(base, offset, (CARD16)(val))
1593
# define MMIO_OUT32(base, offset, val) \
1594
xf86WriteMmio32Be(base, offset, (CARD32)(val))
1595
# define MMIO_ONB16(base, offset, val) \
1596
xf86WriteMmioNB16Be(base, offset, (CARD16)(val))
1597
# define MMIO_ONB32(base, offset, val) \
1598
xf86WriteMmioNB32Be(base, offset, (CARD32)(val))
1599
# else /* byteswapping is the default */
1600
# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset)
1601
# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset)
1602
# define MMIO_OUT16(base, offset, val) \
1603
xf86WriteMmio16Le(base, offset, (CARD16)(val))
1604
# define MMIO_OUT32(base, offset, val) \
1605
xf86WriteMmio32Le(base, offset, (CARD32)(val))
1606
# define MMIO_ONB16(base, offset, val) \
1607
xf86WriteMmioNB16Le(base, offset, (CARD16)(val))
1608
# define MMIO_ONB32(base, offset, val) \
1609
xf86WriteMmioNB32Le(base, offset, (CARD32)(val))
1612
# define MMIO_MOVE32(base, offset, val) \
1613
xf86WriteMmio32Be(base, offset, (CARD32)(val))
1615
static __inline__ void ppc_flush_icache(char *addr)
1623
: : "r"(addr) : "memory");
1626
# elif defined(__sparc__) || defined(sparc) || defined(__sparc)
1628
* Like powerpc, we provide byteswapping and no byteswapping functions
1629
* here with byteswapping as default, drivers that don't need byteswapping
1630
* should define SPARC_MMIO_IS_BE (perhaps create a generic macro so that we
1631
* do not need to use PPC_MMIO_IS_BE and the sparc one in all the same places
1634
# define MMIO_IN8(base, offset) xf86ReadMmio8(base, offset)
1635
# define MMIO_OUT8(base, offset, val) \
1636
xf86WriteMmio8(base, offset, (CARD8)(val))
1637
# define MMIO_ONB8(base, offset, val) \
1638
xf86WriteMmio8NB(base, offset, (CARD8)(val))
1640
# if defined(SPARC_MMIO_IS_BE) /* No byteswapping */
1641
# define MMIO_IN16(base, offset) xf86ReadMmio16Be(base, offset)
1642
# define MMIO_IN32(base, offset) xf86ReadMmio32Be(base, offset)
1643
# define MMIO_OUT16(base, offset, val) \
1644
xf86WriteMmio16Be(base, offset, (CARD16)(val))
1645
# define MMIO_OUT32(base, offset, val) \
1646
xf86WriteMmio32Be(base, offset, (CARD32)(val))
1647
# define MMIO_ONB16(base, offset, val) \
1648
xf86WriteMmio16BeNB(base, offset, (CARD16)(val))
1649
# define MMIO_ONB32(base, offset, val) \
1650
xf86WriteMmio32BeNB(base, offset, (CARD32)(val))
1651
# else /* byteswapping is the default */
1652
# define MMIO_IN16(base, offset) xf86ReadMmio16Le(base, offset)
1653
# define MMIO_IN32(base, offset) xf86ReadMmio32Le(base, offset)
1654
# define MMIO_OUT16(base, offset, val) \
1655
xf86WriteMmio16Le(base, offset, (CARD16)(val))
1656
# define MMIO_OUT32(base, offset, val) \
1657
xf86WriteMmio32Le(base, offset, (CARD32)(val))
1658
# define MMIO_ONB16(base, offset, val) \
1659
xf86WriteMmio16LeNB(base, offset, (CARD16)(val))
1660
# define MMIO_ONB32(base, offset, val) \
1661
xf86WriteMmio32LeNB(base, offset, (CARD32)(val))
1664
# define MMIO_MOVE32(base, offset, val) \
1665
xf86WriteMmio32Be(base, offset, (CARD32)(val))
1667
# else /* !__alpha__ && !__powerpc__ && !__sparc__ */
1669
# define MMIO_IN8(base, offset) \
1670
*(volatile CARD8 *)(((CARD8*)(base)) + (offset))
1671
# define MMIO_IN16(base, offset) \
1672
*(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset))
1673
# define MMIO_IN32(base, offset) \
1674
*(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset))
1675
# define MMIO_OUT8(base, offset, val) \
1676
*(volatile CARD8 *)(((CARD8*)(base)) + (offset)) = (val)
1677
# define MMIO_OUT16(base, offset, val) \
1678
*(volatile CARD16 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1679
# define MMIO_OUT32(base, offset, val) \
1680
*(volatile CARD32 *)(void *)(((CARD8*)(base)) + (offset)) = (val)
1681
# define MMIO_ONB8(base, offset, val) MMIO_OUT8(base, offset, val)
1682
# define MMIO_ONB16(base, offset, val) MMIO_OUT16(base, offset, val)
1683
# define MMIO_ONB32(base, offset, val) MMIO_OUT32(base, offset, val)
1685
# define MMIO_MOVE32(base, offset, val) MMIO_OUT32(base, offset, val)
1687
# endif /* __alpha__ */
1690
* With Intel, the version in os-support/misc/SlowBcopy.s is used.
1691
* This avoids port I/O during the copy (which causes problems with
1695
# define slowbcopy_tobus(src,dst,count) xf86SlowBCopyToBus(src,dst,count)
1696
# define slowbcopy_frombus(src,dst,count) xf86SlowBCopyFromBus(src,dst,count)
1697
# else /* __alpha__ */
1698
# define slowbcopy_tobus(src,dst,count) xf86SlowBcopy(src,dst,count)
1699
# define slowbcopy_frombus(src,dst,count) xf86SlowBcopy(src,dst,count)
1700
# endif /* __alpha__ */
1702
#endif /* _COMPILER_H */