255
256
VG_CLEAR(mmap_arg);
256
257
mmap_arg.handle = bo->handle;
257
258
if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg)) {
258
ErrorF("%s: failed to achieve GTT offset for handle=%d: %d\n",
259
ErrorF("%s: failed to retrieve GTT offset for handle=%d: %d\n",
259
260
__FUNCTION__, bo->handle, errno);
260
261
(void)__kgem_throttle_retire(kgem, 0);
261
262
if (kgem_expire_cache(kgem))
874
875
kgem->next_request = __kgem_request_alloc();
876
877
DBG(("%s: cpu bo enabled %d: llc? %d, set-cache-level? %d, userptr? %d\n", __FUNCTION__,
877
kgem->has_llc | kgem->has_userptr | kgem->has_cacheing,
878
!DBG_NO_CPU && (kgem->has_llc | kgem->has_userptr | kgem->has_cacheing),
878
879
kgem->has_llc, kgem->has_cacheing, kgem->has_userptr));
880
881
VG_CLEAR(aperture);
987
inline static uint32_t kgem_pitch_alignment(struct kgem *kgem, unsigned flags)
989
if (flags & CREATE_PRIME)
991
if (flags & CREATE_SCANOUT)
993
return kgem->min_alignment;
984
996
static uint32_t kgem_untiled_pitch(struct kgem *kgem,
985
997
uint32_t width, uint32_t bpp,
988
1000
width = ALIGN(width, 2) * bpp >> 3;
989
return ALIGN(width, scanout ? 64 : kgem->min_alignment);
1001
return ALIGN(width, kgem_pitch_alignment(kgem, flags));
992
1004
void kgem_get_tile_size(struct kgem *kgem, int tiling,
1056
1068
tile_width = 2 * bpp >> 3;
1057
1069
tile_width = ALIGN(tile_width,
1058
scanout ? 64 : kgem->min_alignment);
1070
kgem_pitch_alignment(kgem, flags));
1059
1071
tile_height = 2;
1061
1073
} else switch (tiling) {
1063
1075
case I915_TILING_NONE:
1064
1076
tile_width = 2 * bpp >> 3;
1065
1077
tile_width = ALIGN(tile_width,
1066
scanout ? 64 : kgem->min_alignment);
1078
kgem_pitch_alignment(kgem, flags));
1067
1079
tile_height = 2;
1069
1081
case I915_TILING_X:
1328
1340
DBG(("%s: handle=%d, fb=%d (reusable=%d)\n",
1329
1341
__FUNCTION__, bo->handle, bo->delta, bo->reusable));
1330
1342
if (bo->delta) {
1343
/* XXX will leak if we are not DRM_MASTER. *shrug* */
1331
1344
drmModeRmFB(kgem->fd, bo->delta);
1376
1389
DBG(("%s: num_pages=%d, flags=%x\n", __FUNCTION__, num_pages, flags));
1391
if ((kgem->has_cacheing | kgem->has_userptr) == 0)
1378
1394
if (list_is_empty(&kgem->snoop)) {
1379
1395
DBG(("%s: inactive and cache empty\n", __FUNCTION__));
1380
1396
if (!__kgem_throttle_retire(kgem, flags)) {
2787
struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size)
2789
#ifdef DRM_IOCTL_PRIME_FD_TO_HANDLE
2790
struct drm_prime_handle args;
2791
struct drm_i915_gem_get_tiling tiling;
2794
DBG(("%s(name=%d)\n", __FUNCTION__, name));
2799
if (drmIoctl(kgem->fd, DRM_IOCTL_PRIME_FD_TO_HANDLE, &args))
2803
tiling.handle = args.handle;
2804
if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &tiling)) {
2805
gem_close(kgem->fd, args.handle);
2809
DBG(("%s: new handle=%d, tiling=%d\n", __FUNCTION__,
2810
args.handle, tiling.tiling_mode));
2811
bo = __kgem_bo_alloc(args.handle, NUM_PAGES(size));
2813
gem_close(kgem->fd, args.handle);
2817
bo->tiling = tiling.tiling_mode;
2818
bo->reusable = false;
2820
debug_alloc__bo(kgem, bo);
2827
int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo)
2829
#ifdef DRM_IOCTL_PRIME_HANDLE_TO_FD
2830
struct drm_prime_handle args;
2833
args.handle = bo->handle;
2834
args.flags = DRM_CLOEXEC;
2836
if (drmIoctl(kgem->fd, DRM_IOCTL_PRIME_HANDLE_TO_FD, &args))
2839
bo->reusable = false;
2771
2846
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags)
2773
2848
struct kgem_bo *bo;
2928
size = kgem_surface_size(kgem, false, false,
3003
size = kgem_surface_size(kgem, false, 0,
2929
3004
width, height, bpp,
2930
3005
I915_TILING_NONE, &pitch);
2931
3006
if (size > 0 && size <= kgem->max_cpu_size)
2943
size = kgem_surface_size(kgem, false, false,
3018
size = kgem_surface_size(kgem, false, 0,
2944
3019
width, height, bpp,
2945
3020
kgem_choose_tiling(kgem, I915_TILING_X,
2946
3021
width, height, bpp),
2993
3068
if (tiling < 0)
2994
3069
tiling = -tiling, flags |= CREATE_EXACT;
2996
DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, temp?=%d)\n", __FUNCTION__,
3071
DBG(("%s(%dx%d, bpp=%d, tiling=%d, exact=%d, inactive=%d, cpu-mapping=%d, gtt-mapping=%d, scanout?=%d, prime?=%d, temp?=%d)\n", __FUNCTION__,
2997
3072
width, height, bpp, tiling,
2998
3073
!!(flags & CREATE_EXACT),
2999
3074
!!(flags & CREATE_INACTIVE),
3000
3075
!!(flags & CREATE_CPU_MAP),
3001
3076
!!(flags & CREATE_GTT_MAP),
3002
3077
!!(flags & CREATE_SCANOUT),
3078
!!(flags & CREATE_PRIME),
3003
3079
!!(flags & CREATE_TEMPORARY)));
3005
size = kgem_surface_size(kgem,
3006
kgem->has_relaxed_fencing,
3007
flags & CREATE_SCANOUT,
3081
size = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
3008
3082
width, height, bpp, tiling, &pitch);
3009
3083
assert(size && size <= kgem->max_object_size);
3010
3084
size /= PAGE_SIZE;
3020
3094
tiled_height = kgem_aligned_height(kgem, height, I915_TILING_Y);
3021
untiled_pitch = kgem_untiled_pitch(kgem,
3023
flags & CREATE_SCANOUT);
3095
untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
3025
3097
list_for_each_entry(bo, &kgem->large, list) {
3026
3098
assert(!bo->purged);
3225
3297
if ((flags & CREATE_EXACT) == 0) { /* allow an active near-miss? */
3226
untiled_pitch = kgem_untiled_pitch(kgem,
3228
flags & CREATE_SCANOUT);
3298
untiled_pitch = kgem_untiled_pitch(kgem, width, bpp, flags);
3230
3300
while (--i >= 0) {
3231
tiled_height = kgem_surface_size(kgem,
3232
kgem->has_relaxed_fencing,
3233
flags & CREATE_SCANOUT,
3301
tiled_height = kgem_surface_size(kgem, kgem->has_relaxed_fencing, flags,
3234
3302
width, height, bpp, tiling, &pitch);
3235
3303
cache = active(kgem, tiled_height / PAGE_SIZE, i);
3236
3304
tiled_height = kgem_aligned_height(kgem, height, i);
3823
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo)
3827
DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__,
3828
bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain));
3830
assert(!bo->purged);
3831
assert(bo->proxy == NULL);
3832
assert(list_is_empty(&bo->list));
3834
if (bo->tiling == I915_TILING_NONE && !bo->scanout && kgem->has_llc) {
3835
DBG(("%s: converting request for GTT map into CPU map\n",
3837
return kgem_bo_map__cpu(kgem, bo);
3840
if (IS_CPU_MAP(bo->map))
3841
kgem_bo_release_map(kgem, bo);
3845
assert(kgem_bo_size(bo) <= kgem->aperture_mappable / 2);
3847
kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo));
3849
ptr = __kgem_bo_map__gtt(kgem, bo);
3853
/* Cache this mapping to avoid the overhead of an
3854
* excruciatingly slow GTT pagefault. This is more an
3855
* issue with compositing managers which need to frequently
3856
* flush CPU damage to their GPU bo.
3859
DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle));
3749
3865
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo)
3756
3872
assert(!bo->purged);
3757
3873
assert(bo->proxy == NULL);
3874
assert(list_is_empty(&bo->list));
3758
3875
assert(bo->exec == NULL);
3759
assert(list_is_empty(&bo->list));
3761
3877
if (bo->tiling == I915_TILING_NONE && !bo->scanout &&
3762
3878
(kgem->has_llc || bo->domain == DOMAIN_CPU)) {