1
/**************************************************************************
3
* Copyright 2006 VMware, Inc.
6
* Permission is hereby granted, free of charge, to any person obtaining a
7
* copy of this software and associated documentation files (the
8
* "Software"), to deal in the Software without restriction, including
9
* without limitation the rights to use, copy, modify, merge, publish,
10
* distribute, sub license, and/or sell copies of the Software, and to
11
* permit persons to whom the Software is furnished to do so, subject to
12
* the following conditions:
14
* The above copyright notice and this permission notice (including the
15
* next paragraph) shall be included in all copies or substantial portions
18
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
**************************************************************************/
30
* Buffer manager using the old texture memory manager.
32
* \author Jose Fonseca <jfonseca@vmware.com>
36
#include "pipe/p_defines.h"
37
#include "util/u_debug.h"
38
#include "os/os_thread.h"
39
#include "util/u_memory.h"
40
#include "util/list.h"
41
#include "util/u_mm.h"
42
#include "pb_buffer.h"
43
#include "pb_bufmgr.h"
47
* Convenience macro (type safe).
49
#define SUPER(__derived) (&(__derived)->base)
54
struct pb_manager base;
59
struct mem_block *heap;
63
struct pb_buffer *buffer;
68
static inline struct mm_pb_manager *
69
mm_pb_manager(struct pb_manager *mgr)
72
return (struct mm_pb_manager *)mgr;
78
struct pb_buffer base;
80
struct mm_pb_manager *mgr;
82
struct mem_block *block;
86
static inline struct mm_buffer *
87
mm_buffer(struct pb_buffer *buf)
90
return (struct mm_buffer *)buf;
95
mm_buffer_destroy(void *winsys, struct pb_buffer *buf)
97
struct mm_buffer *mm_buf = mm_buffer(buf);
98
struct mm_pb_manager *mm = mm_buf->mgr;
100
assert(!pipe_is_referenced(&mm_buf->base.reference));
102
mtx_lock(&mm->mutex);
103
u_mmFreeMem(mm_buf->block);
105
mtx_unlock(&mm->mutex);
110
mm_buffer_map(struct pb_buffer *buf,
111
enum pb_usage_flags flags,
114
struct mm_buffer *mm_buf = mm_buffer(buf);
115
struct mm_pb_manager *mm = mm_buf->mgr;
117
/* XXX: it will be necessary to remap here to propagate flush_ctx */
119
return (unsigned char *) mm->map + mm_buf->block->ofs;
124
mm_buffer_unmap(struct pb_buffer *buf)
130
static enum pipe_error
131
mm_buffer_validate(struct pb_buffer *buf,
132
struct pb_validate *vl,
133
enum pb_usage_flags flags)
135
struct mm_buffer *mm_buf = mm_buffer(buf);
136
struct mm_pb_manager *mm = mm_buf->mgr;
137
return pb_validate(mm->buffer, vl, flags);
142
mm_buffer_fence(struct pb_buffer *buf,
143
struct pipe_fence_handle *fence)
145
struct mm_buffer *mm_buf = mm_buffer(buf);
146
struct mm_pb_manager *mm = mm_buf->mgr;
147
pb_fence(mm->buffer, fence);
152
mm_buffer_get_base_buffer(struct pb_buffer *buf,
153
struct pb_buffer **base_buf,
156
struct mm_buffer *mm_buf = mm_buffer(buf);
157
struct mm_pb_manager *mm = mm_buf->mgr;
158
pb_get_base_buffer(mm->buffer, base_buf, offset);
159
*offset += mm_buf->block->ofs;
163
static const struct pb_vtbl
170
mm_buffer_get_base_buffer
174
static struct pb_buffer *
175
mm_bufmgr_create_buffer(struct pb_manager *mgr,
177
const struct pb_desc *desc)
179
struct mm_pb_manager *mm = mm_pb_manager(mgr);
180
struct mm_buffer *mm_buf;
182
/* We don't handle alignments larger then the one initially setup */
183
assert(pb_check_alignment(desc->alignment, 1u << mm->align2));
184
if(!pb_check_alignment(desc->alignment, 1u << mm->align2))
187
mtx_lock(&mm->mutex);
189
mm_buf = CALLOC_STRUCT(mm_buffer);
191
mtx_unlock(&mm->mutex);
195
pipe_reference_init(&mm_buf->base.reference, 1);
196
mm_buf->base.alignment_log2 = util_logbase2(desc->alignment);
197
mm_buf->base.usage = desc->usage;
198
mm_buf->base.size = size;
200
mm_buf->base.vtbl = &mm_buffer_vtbl;
204
mm_buf->block = u_mmAllocMem(mm->heap, (int)size, (int)mm->align2, 0);
207
debug_printf("warning: heap full\n");
208
mmDumpMemInfo(mm->heap);
211
mtx_unlock(&mm->mutex);
215
/* Some sanity checks */
216
assert(0 <= (pb_size)mm_buf->block->ofs && (pb_size)mm_buf->block->ofs < mm->size);
217
assert(size <= (pb_size)mm_buf->block->size && (pb_size)mm_buf->block->ofs + (pb_size)mm_buf->block->size <= mm->size);
219
mtx_unlock(&mm->mutex);
220
return SUPER(mm_buf);
225
mm_bufmgr_flush(struct pb_manager *mgr)
232
mm_bufmgr_destroy(struct pb_manager *mgr)
234
struct mm_pb_manager *mm = mm_pb_manager(mgr);
236
mtx_lock(&mm->mutex);
238
u_mmDestroy(mm->heap);
240
pb_unmap(mm->buffer);
241
pb_reference(&mm->buffer, NULL);
243
mtx_unlock(&mm->mutex);
250
mm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
251
pb_size size, pb_size align2)
253
struct mm_pb_manager *mm;
258
mm = CALLOC_STRUCT(mm_pb_manager);
262
mm->base.destroy = mm_bufmgr_destroy;
263
mm->base.create_buffer = mm_bufmgr_create_buffer;
264
mm->base.flush = mm_bufmgr_flush;
267
mm->align2 = align2; /* 64-byte alignment */
269
(void) mtx_init(&mm->mutex, mtx_plain);
273
mm->map = pb_map(mm->buffer,
275
PB_USAGE_CPU_WRITE, NULL);
279
mm->heap = u_mmInit(0, (int)size);
287
u_mmDestroy(mm->heap);
289
pb_unmap(mm->buffer);
296
mm_bufmgr_create(struct pb_manager *provider,
297
pb_size size, pb_size align2)
299
struct pb_buffer *buffer;
300
struct pb_manager *mgr;
306
memset(&desc, 0, sizeof(desc));
307
desc.alignment = 1 << align2;
309
buffer = provider->create_buffer(provider, size, &desc);
313
mgr = mm_bufmgr_create_from_buffer(buffer, size, align2);
315
pb_reference(&buffer, NULL);