2
* Copyright (c) 2012 Rob Clark <robdclark@gmail.com>
4
* Permission is hereby granted, free of charge, to any person obtaining a
5
* copy of this software and associated documentation files (the "Software"),
6
* to deal in the Software without restriction, including without limitation
7
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
* and/or sell copies of the Software, and to permit persons to whom the
9
* Software is furnished to do so, subject to the following conditions:
11
* The above copyright notice and this permission notice (including the next
12
* paragraph) shall be included in all copies or substantial portions of the
15
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25
* Helper lib to track gpu buffers contents/address, and map between gpu and
26
* host address while decoding cmdstream/crashdumps
32
#include "util/rb_tree.h"
41
/* for 'once' mode, for buffers containing cmdstream keep track per offset
42
* into buffer of which modes it has already been dumped;
51
static struct rb_tree buffers;
54
buffer_insert_cmp(const struct rb_node *n1, const struct rb_node *n2)
56
const struct buffer *buf1 = (const struct buffer *)n1;
57
const struct buffer *buf2 = (const struct buffer *)n2;
58
/* Note that gpuaddr comparisions can overflow an int: */
59
if (buf1->gpuaddr > buf2->gpuaddr)
61
else if (buf1->gpuaddr < buf2->gpuaddr)
67
buffer_search_cmp(const struct rb_node *node, const void *addrptr)
69
const struct buffer *buf = (const struct buffer *)node;
70
uint64_t gpuaddr = *(uint64_t *)addrptr;
71
if (buf->gpuaddr + buf->len <= gpuaddr)
73
else if (buf->gpuaddr > gpuaddr)
78
static struct buffer *
79
get_buffer(uint64_t gpuaddr)
83
return (struct buffer *)rb_tree_search(&buffers, &gpuaddr,
88
buffer_contains_hostptr(struct buffer *buf, void *hostptr)
90
return (buf->hostptr <= hostptr) && (hostptr < (buf->hostptr + buf->len));
94
gpuaddr(void *hostptr)
96
rb_tree_foreach (struct buffer, buf, &buffers, node) {
97
if (buffer_contains_hostptr(buf, hostptr))
98
return buf->gpuaddr + (hostptr - buf->hostptr);
104
gpubaseaddr(uint64_t gpuaddr)
106
struct buffer *buf = get_buffer(gpuaddr);
114
hostptr(uint64_t gpuaddr)
116
struct buffer *buf = get_buffer(gpuaddr);
118
return buf->hostptr + (gpuaddr - buf->gpuaddr);
124
hostlen(uint64_t gpuaddr)
126
struct buffer *buf = get_buffer(gpuaddr);
128
return buf->len + buf->gpuaddr - gpuaddr;
134
has_dumped(uint64_t gpuaddr, unsigned enable_mask)
139
struct buffer *b = get_buffer(gpuaddr);
143
assert(gpuaddr >= b->gpuaddr);
144
unsigned offset = gpuaddr - b->gpuaddr;
147
while (n < b->noffsets) {
148
if (offset == b->offsets[n].offset)
153
/* if needed, allocate a new offset entry: */
154
if (n == b->noffsets) {
156
assert(b->noffsets < ARRAY_SIZE(b->offsets));
157
b->offsets[n].dumped_mask = 0;
158
b->offsets[n].offset = offset;
161
if ((b->offsets[n].dumped_mask & enable_mask) == enable_mask)
164
b->offsets[n].dumped_mask |= enable_mask;
172
rb_tree_foreach_safe (struct buffer, buf, &buffers, node) {
173
rb_tree_remove(&buffers, &buf->node);
180
* Record buffer contents, takes ownership of hostptr (freed in
184
add_buffer(uint64_t gpuaddr, unsigned int len, void *hostptr)
186
struct buffer *buf = get_buffer(gpuaddr);
189
buf = calloc(sizeof(struct buffer), 1);
190
buf->gpuaddr = gpuaddr;
191
rb_tree_insert(&buffers, &buf->node, buffer_insert_cmp);
194
assert(buf->gpuaddr == gpuaddr);
196
buf->hostptr = hostptr;