2
* Copyright (C) 2014-2015 Etnaviv Project
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24
* Christian Gmeiner <christian.gmeiner@gmail.com>
27
#ifndef ETNAVIV_PRIV_H_
28
#define ETNAVIV_PRIV_H_
36
#include <sys/ioctl.h>
42
#include "util/list.h"
44
#include "util/macros.h"
45
#include "util/simple_mtx.h"
46
#include "util/timespec.h"
47
#include "util/u_atomic.h"
48
#include "util/u_debug.h"
51
#include "etnaviv_drmif.h"
52
#include "drm-uapi/etnaviv_drm.h"
54
extern simple_mtx_t etna_drm_table_lock;
56
struct etna_bo_bucket {
58
struct list_head list;
61
struct etna_bo_cache {
62
struct etna_bo_bucket cache_bucket[14 * 4];
72
/* tables to keep track of bo's, to avoid "evil-twin" etna_bo objects:
74
* handle_table: maps handle to etna_bo
75
* name_table: maps flink name to etna_bo
77
* We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
78
* returns a new handle. So we need to figure out if the bo is already
79
* open in the process first, before calling gem-open.
81
void *handle_table, *name_table;
83
struct etna_bo_cache bo_cache;
84
struct list_head zombie_list;
87
struct util_vma_heap address_space;
89
int closefd; /* call close(fd) upon destruction */
92
void etna_bo_free(struct etna_bo *bo);
93
void etna_bo_kill_zombies(struct etna_device *dev);
95
void etna_bo_cache_init(struct etna_bo_cache *cache);
96
void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time);
97
struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache,
98
uint32_t *size, uint32_t flags);
99
int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo);
101
/* for where @etna_drm_table_lock is already held: */
102
void etna_device_del_locked(struct etna_device *dev);
104
/* a GEM buffer object allocated from the DRM device */
106
struct etna_device *dev;
107
void *map; /* userspace mmap'ing (if there is one) */
111
uint32_t name; /* flink global handle (DRI2 name) */
112
uint64_t offset; /* offset to mmap() */
113
uint32_t va; /* GPU virtual address */
117
* To avoid excess hashtable lookups, cache the stream this bo was
118
* last emitted on (since that will probably also be the next ring
121
struct etna_cmd_stream *current_stream;
125
struct list_head list; /* bucket-list entry */
126
time_t free_time; /* time when added to bucket-list */
130
struct etna_device *dev;
137
enum etna_pipe_id id;
138
struct etna_gpu *gpu;
141
struct etna_cmd_stream_priv {
142
struct etna_cmd_stream base;
143
struct etna_pipe *pipe;
145
uint32_t last_timestamp;
147
/* submit ioctl related tables: */
150
struct drm_etnaviv_gem_submit_bo *bos;
151
uint32_t nr_bos, max_bos;
154
struct drm_etnaviv_gem_submit_reloc *relocs;
155
uint32_t nr_relocs, max_relocs;
158
struct drm_etnaviv_gem_submit_pmr *pmrs;
159
uint32_t nr_pmrs, max_pmrs;
162
/* should have matching entries in submit.bos: */
163
struct etna_bo **bos;
164
uint32_t nr_bos, max_bos;
166
/* notify callback if buffer reset happened */
167
void (*force_flush)(struct etna_cmd_stream *stream, void *priv);
168
void *force_flush_priv;
173
struct etna_perfmon {
174
struct list_head domains;
175
struct etna_pipe *pipe;
178
struct etna_perfmon_domain
180
struct list_head head;
181
struct list_head signals;
186
struct etna_perfmon_signal
188
struct list_head head;
189
struct etna_perfmon_domain *domain;
194
#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
196
#define ETNA_DRM_MSGS 0x40
197
extern int etna_mesa_debug;
199
#define INFO_MSG(fmt, ...) \
200
do { mesa_logi("%s:%d: " fmt, \
201
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
202
#define DEBUG_MSG(fmt, ...) \
203
do if (etna_mesa_debug & ETNA_DRM_MSGS) { \
204
mesa_logd("%s:%d: " fmt, \
205
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
206
#define WARN_MSG(fmt, ...) \
207
do { mesa_logw("%s:%d: " fmt, \
208
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
209
#define ERROR_MSG(fmt, ...) \
210
do { mesa_loge("%s:%d: " fmt, \
211
__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)
213
#define DEBUG_BO(msg, bo) \
214
DEBUG_MSG("%s %p, va: 0x%.8x, size: 0x%.8x, flags: 0x%.8x, " \
215
"refcnt: %d, handle: 0x%.8x, name: 0x%.8x", \
216
msg, bo, bo->va, bo->size, bo->flags, bo->refcnt, bo->handle, bo->name);
218
#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
220
static inline void get_abs_timeout(struct drm_etnaviv_timespec *tv, uint64_t ns)
223
clock_gettime(CLOCK_MONOTONIC, &t);
224
tv->tv_sec = t.tv_sec + ns / NSEC_PER_SEC;
225
tv->tv_nsec = t.tv_nsec + ns % NSEC_PER_SEC;
226
if (tv->tv_nsec >= NSEC_PER_SEC) {
227
tv->tv_nsec -= NSEC_PER_SEC;
233
# include <memcheck.h>
236
* For tracking the backing memory (if valgrind enabled, we force a mmap
237
* for the purposes of tracking)
239
static inline void VG_BO_ALLOC(struct etna_bo *bo)
241
if (bo && RUNNING_ON_VALGRIND) {
242
VALGRIND_MALLOCLIKE_BLOCK(etna_bo_map(bo), bo->size, 0, 1);
246
static inline void VG_BO_FREE(struct etna_bo *bo)
248
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
252
* For tracking bo structs that are in the buffer-cache, so that valgrind
253
* doesn't attribute ownership to the first one to allocate the recycled
256
* Note that the list_head in etna_bo is used to track the buffers in cache
257
* so disable error reporting on the range while they are in cache so
258
* valgrind doesn't squawk about list traversal.
261
static inline void VG_BO_RELEASE(struct etna_bo *bo)
263
if (RUNNING_ON_VALGRIND) {
264
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
265
VALGRIND_MAKE_MEM_NOACCESS(bo, sizeof(*bo));
266
VALGRIND_FREELIKE_BLOCK(bo->map, 0);
269
static inline void VG_BO_OBTAIN(struct etna_bo *bo)
271
if (RUNNING_ON_VALGRIND) {
272
VALGRIND_MAKE_MEM_DEFINED(bo, sizeof(*bo));
273
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
274
VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
278
static inline void VG_BO_ALLOC(struct etna_bo *bo) {}
279
static inline void VG_BO_FREE(struct etna_bo *bo) {}
280
static inline void VG_BO_RELEASE(struct etna_bo *bo) {}
281
static inline void VG_BO_OBTAIN(struct etna_bo *bo) {}
284
#endif /* ETNAVIV_PRIV_H_ */