~mmach/netext73/mesa-haswell

« back to all changes in this revision

Viewing changes to src/vulkan/wsi/wsi_common_x11.c

  • Committer: mmach
  • Date: 2022-09-22 19:56:13 UTC
  • Revision ID: netbit73@gmail.com-20220922195613-wtik9mmy20tmor0i
2022-09-22 21:17:09

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 * Copyright © 2015 Intel Corporation
3
 
 *
4
 
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 
 * copy of this software and associated documentation files (the "Software"),
6
 
 * to deal in the Software without restriction, including without limitation
7
 
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 
 * and/or sell copies of the Software, and to permit persons to whom the
9
 
 * Software is furnished to do so, subject to the following conditions:
10
 
 *
11
 
 * The above copyright notice and this permission notice (including the next
12
 
 * paragraph) shall be included in all copies or substantial portions of the
13
 
 * Software.
14
 
 *
15
 
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21
 
 * IN THE SOFTWARE.
22
 
 */
23
 
 
24
 
#include <X11/Xlib-xcb.h>
25
 
#include <X11/xshmfence.h>
26
 
#include <xcb/xcb.h>
27
 
#include <xcb/dri3.h>
28
 
#include <xcb/present.h>
29
 
#include <xcb/shm.h>
30
 
 
31
 
#include "util/macros.h"
32
 
#include <stdatomic.h>
33
 
#include <stdlib.h>
34
 
#include <stdio.h>
35
 
#include <unistd.h>
36
 
#include <errno.h>
37
 
#include <string.h>
38
 
#include <fcntl.h>
39
 
#include <poll.h>
40
 
#include <xf86drm.h>
41
 
#include "drm-uapi/drm_fourcc.h"
42
 
#include "util/hash_table.h"
43
 
#include "util/os_time.h"
44
 
#include "util/u_debug.h"
45
 
#include "util/u_thread.h"
46
 
#include "util/xmlconfig.h"
47
 
 
48
 
#include "vk_instance.h"
49
 
#include "vk_physical_device.h"
50
 
#include "vk_util.h"
51
 
#include "vk_enum_to_str.h"
52
 
#include "wsi_common_entrypoints.h"
53
 
#include "wsi_common_private.h"
54
 
#include "wsi_common_queue.h"
55
 
 
56
 
#ifdef HAVE_SYS_SHM_H
57
 
#include <sys/ipc.h>
58
 
#include <sys/shm.h>
59
 
#endif
60
 
 
61
 
struct wsi_x11_connection {
62
 
   bool has_dri3;
63
 
   bool has_dri3_modifiers;
64
 
   bool has_present;
65
 
   bool is_proprietary_x11;
66
 
   bool is_xwayland;
67
 
   bool has_mit_shm;
68
 
};
69
 
 
70
 
struct wsi_x11 {
71
 
   struct wsi_interface base;
72
 
 
73
 
   pthread_mutex_t                              mutex;
74
 
   /* Hash table of xcb_connection -> wsi_x11_connection mappings */
75
 
   struct hash_table *connections;
76
 
};
77
 
 
78
 
 
79
 
/**
80
 
 * Wrapper around xcb_dri3_open. Returns the opened fd or -1 on error.
81
 
 */
82
 
static int
83
 
wsi_dri3_open(xcb_connection_t *conn,
84
 
              xcb_window_t root,
85
 
              uint32_t provider)
86
 
{
87
 
   xcb_dri3_open_cookie_t       cookie;
88
 
   xcb_dri3_open_reply_t        *reply;
89
 
   int                          fd;
90
 
 
91
 
   cookie = xcb_dri3_open(conn,
92
 
                          root,
93
 
                          provider);
94
 
 
95
 
   reply = xcb_dri3_open_reply(conn, cookie, NULL);
96
 
   if (!reply)
97
 
      return -1;
98
 
 
99
 
   /* According to DRI3 extension nfd must equal one. */
100
 
   if (reply->nfd != 1) {
101
 
      free(reply);
102
 
      return -1;
103
 
   }
104
 
 
105
 
   fd = xcb_dri3_open_reply_fds(conn, reply)[0];
106
 
   free(reply);
107
 
   fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
108
 
 
109
 
   return fd;
110
 
}
111
 
 
112
 
/**
113
 
 * Checks compatibility of the device wsi_dev with the device the X server
114
 
 * provides via DRI3.
115
 
 *
116
 
 * This returns true when no device could be retrieved from the X server or when
117
 
 * the information for the X server device indicate that it is the same device.
118
 
 */
119
 
static bool
120
 
wsi_x11_check_dri3_compatible(const struct wsi_device *wsi_dev,
121
 
                              xcb_connection_t *conn)
122
 
{
123
 
   xcb_screen_iterator_t screen_iter =
124
 
      xcb_setup_roots_iterator(xcb_get_setup(conn));
125
 
   xcb_screen_t *screen = screen_iter.data;
126
 
 
127
 
   /* Open the DRI3 device from the X server. If we do not retrieve one we
128
 
    * assume our local device is compatible.
129
 
    */
130
 
   int dri3_fd = wsi_dri3_open(conn, screen->root, None);
131
 
   if (dri3_fd == -1)
132
 
      return true;
133
 
 
134
 
   bool match = wsi_device_matches_drm_fd(wsi_dev, dri3_fd);
135
 
 
136
 
   close(dri3_fd);
137
 
 
138
 
   return match;
139
 
}
140
 
 
141
 
static bool
142
 
wsi_x11_detect_xwayland(xcb_connection_t *conn)
143
 
{
144
 
   xcb_randr_query_version_cookie_t ver_cookie =
145
 
      xcb_randr_query_version_unchecked(conn, 1, 3);
146
 
   xcb_randr_query_version_reply_t *ver_reply =
147
 
      xcb_randr_query_version_reply(conn, ver_cookie, NULL);
148
 
   bool has_randr_v1_3 = ver_reply && (ver_reply->major_version > 1 ||
149
 
                                       ver_reply->minor_version >= 3);
150
 
   free(ver_reply);
151
 
 
152
 
   if (!has_randr_v1_3)
153
 
      return false;
154
 
 
155
 
   const xcb_setup_t *setup = xcb_get_setup(conn);
156
 
   xcb_screen_iterator_t iter = xcb_setup_roots_iterator(setup);
157
 
 
158
 
   xcb_randr_get_screen_resources_current_cookie_t gsr_cookie =
159
 
      xcb_randr_get_screen_resources_current_unchecked(conn, iter.data->root);
160
 
   xcb_randr_get_screen_resources_current_reply_t *gsr_reply =
161
 
      xcb_randr_get_screen_resources_current_reply(conn, gsr_cookie, NULL);
162
 
 
163
 
   if (!gsr_reply || gsr_reply->num_outputs == 0) {
164
 
      free(gsr_reply);
165
 
      return false;
166
 
   }
167
 
 
168
 
   xcb_randr_output_t *randr_outputs =
169
 
      xcb_randr_get_screen_resources_current_outputs(gsr_reply);
170
 
   xcb_randr_get_output_info_cookie_t goi_cookie =
171
 
      xcb_randr_get_output_info(conn, randr_outputs[0], gsr_reply->config_timestamp);
172
 
   free(gsr_reply);
173
 
 
174
 
   xcb_randr_get_output_info_reply_t *goi_reply =
175
 
      xcb_randr_get_output_info_reply(conn, goi_cookie, NULL);
176
 
   if (!goi_reply) {
177
 
      return false;
178
 
   }
179
 
 
180
 
   char *output_name = (char*)xcb_randr_get_output_info_name(goi_reply);
181
 
   bool is_xwayland = output_name && strncmp(output_name, "XWAYLAND", 8) == 0;
182
 
   free(goi_reply);
183
 
 
184
 
   return is_xwayland;
185
 
}
186
 
 
187
 
static struct wsi_x11_connection *
188
 
wsi_x11_connection_create(struct wsi_device *wsi_dev,
189
 
                          xcb_connection_t *conn)
190
 
{
191
 
   xcb_query_extension_cookie_t dri3_cookie, pres_cookie, randr_cookie, amd_cookie, nv_cookie, shm_cookie, sync_cookie;
192
 
   xcb_query_extension_reply_t *dri3_reply, *pres_reply, *randr_reply, *amd_reply, *nv_reply, *shm_reply = NULL;
193
 
   bool has_dri3_v1_2 = false;
194
 
   bool has_present_v1_2 = false;
195
 
 
196
 
   struct wsi_x11_connection *wsi_conn =
197
 
      vk_alloc(&wsi_dev->instance_alloc, sizeof(*wsi_conn), 8,
198
 
                VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
199
 
   if (!wsi_conn)
200
 
      return NULL;
201
 
 
202
 
   sync_cookie = xcb_query_extension(conn, 4, "SYNC");
203
 
   dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
204
 
   pres_cookie = xcb_query_extension(conn, 7, "Present");
205
 
   randr_cookie = xcb_query_extension(conn, 5, "RANDR");
206
 
 
207
 
   if (wsi_dev->sw)
208
 
      shm_cookie = xcb_query_extension(conn, 7, "MIT-SHM");
209
 
 
210
 
   /* We try to be nice to users and emit a warning if they try to use a
211
 
    * Vulkan application on a system without DRI3 enabled.  However, this ends
212
 
    * up spewing the warning when a user has, for example, both Intel
213
 
    * integrated graphics and a discrete card with proprietary drivers and are
214
 
    * running on the discrete card with the proprietary DDX.  In this case, we
215
 
    * really don't want to print the warning because it just confuses users.
216
 
    * As a heuristic to detect this case, we check for a couple of proprietary
217
 
    * X11 extensions.
218
 
    */
219
 
   amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
220
 
   nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
221
 
 
222
 
   xcb_discard_reply(conn, sync_cookie.sequence);
223
 
   dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
224
 
   pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
225
 
   randr_reply = xcb_query_extension_reply(conn, randr_cookie, NULL);
226
 
   amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
227
 
   nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
228
 
   if (wsi_dev->sw)
229
 
      shm_reply = xcb_query_extension_reply(conn, shm_cookie, NULL);
230
 
   if (!dri3_reply || !pres_reply) {
231
 
      free(dri3_reply);
232
 
      free(pres_reply);
233
 
      free(randr_reply);
234
 
      free(amd_reply);
235
 
      free(nv_reply);
236
 
      if (wsi_dev->sw)
237
 
         free(shm_reply);
238
 
      vk_free(&wsi_dev->instance_alloc, wsi_conn);
239
 
      return NULL;
240
 
   }
241
 
 
242
 
   wsi_conn->has_dri3 = dri3_reply->present != 0;
243
 
#ifdef HAVE_DRI3_MODIFIERS
244
 
   if (wsi_conn->has_dri3) {
245
 
      xcb_dri3_query_version_cookie_t ver_cookie;
246
 
      xcb_dri3_query_version_reply_t *ver_reply;
247
 
 
248
 
      ver_cookie = xcb_dri3_query_version(conn, 1, 2);
249
 
      ver_reply = xcb_dri3_query_version_reply(conn, ver_cookie, NULL);
250
 
      has_dri3_v1_2 = ver_reply != NULL &&
251
 
         (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
252
 
      free(ver_reply);
253
 
   }
254
 
#endif
255
 
 
256
 
   wsi_conn->has_present = pres_reply->present != 0;
257
 
#ifdef HAVE_DRI3_MODIFIERS
258
 
   if (wsi_conn->has_present) {
259
 
      xcb_present_query_version_cookie_t ver_cookie;
260
 
      xcb_present_query_version_reply_t *ver_reply;
261
 
 
262
 
      ver_cookie = xcb_present_query_version(conn, 1, 2);
263
 
      ver_reply = xcb_present_query_version_reply(conn, ver_cookie, NULL);
264
 
      has_present_v1_2 =
265
 
        (ver_reply->major_version > 1 || ver_reply->minor_version >= 2);
266
 
      free(ver_reply);
267
 
   }
268
 
#endif
269
 
 
270
 
   if (randr_reply && randr_reply->present != 0)
271
 
      wsi_conn->is_xwayland = wsi_x11_detect_xwayland(conn);
272
 
   else
273
 
      wsi_conn->is_xwayland = false;
274
 
 
275
 
   wsi_conn->has_dri3_modifiers = has_dri3_v1_2 && has_present_v1_2;
276
 
   wsi_conn->is_proprietary_x11 = false;
277
 
   if (amd_reply && amd_reply->present)
278
 
      wsi_conn->is_proprietary_x11 = true;
279
 
   if (nv_reply && nv_reply->present)
280
 
      wsi_conn->is_proprietary_x11 = true;
281
 
 
282
 
   wsi_conn->has_mit_shm = false;
283
 
   if (wsi_conn->has_dri3 && wsi_conn->has_present && wsi_dev->sw) {
284
 
      bool has_mit_shm = shm_reply->present != 0;
285
 
 
286
 
      xcb_shm_query_version_cookie_t ver_cookie;
287
 
      xcb_shm_query_version_reply_t *ver_reply;
288
 
 
289
 
      ver_cookie = xcb_shm_query_version(conn);
290
 
      ver_reply = xcb_shm_query_version_reply(conn, ver_cookie, NULL);
291
 
 
292
 
      has_mit_shm = ver_reply->shared_pixmaps;
293
 
      free(ver_reply);
294
 
      xcb_void_cookie_t cookie;
295
 
      xcb_generic_error_t *error;
296
 
 
297
 
      if (has_mit_shm) {
298
 
         cookie = xcb_shm_detach_checked(conn, 0);
299
 
         if ((error = xcb_request_check(conn, cookie))) {
300
 
            if (error->error_code != BadRequest)
301
 
               wsi_conn->has_mit_shm = true;
302
 
            free(error);
303
 
         }
304
 
      }
305
 
      free(shm_reply);
306
 
   }
307
 
 
308
 
   free(dri3_reply);
309
 
   free(pres_reply);
310
 
   free(randr_reply);
311
 
   free(amd_reply);
312
 
   free(nv_reply);
313
 
 
314
 
   return wsi_conn;
315
 
}
316
 
 
317
 
static void
318
 
wsi_x11_connection_destroy(struct wsi_device *wsi_dev,
319
 
                           struct wsi_x11_connection *conn)
320
 
{
321
 
   vk_free(&wsi_dev->instance_alloc, conn);
322
 
}
323
 
 
324
 
static bool
325
 
wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
326
 
{
327
 
  if (wsi_conn->has_dri3)
328
 
    return true;
329
 
  if (!wsi_conn->is_proprietary_x11) {
330
 
    fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
331
 
                    "Note: you can probably enable DRI3 in your Xorg config\n");
332
 
  }
333
 
  return false;
334
 
}
335
 
 
336
 
/**
337
 
 * Get internal struct representing an xcb_connection_t.
338
 
 *
339
 
 * This can allocate the struct but the caller does not own the struct. It is
340
 
 * deleted on wsi_x11_finish_wsi by the hash table it is inserted.
341
 
 *
342
 
 * If the allocation fails NULL is returned.
343
 
 */
344
 
static struct wsi_x11_connection *
345
 
wsi_x11_get_connection(struct wsi_device *wsi_dev,
346
 
                       xcb_connection_t *conn)
347
 
{
348
 
   struct wsi_x11 *wsi =
349
 
      (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
350
 
 
351
 
   pthread_mutex_lock(&wsi->mutex);
352
 
 
353
 
   struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
354
 
   if (!entry) {
355
 
      /* We're about to make a bunch of blocking calls.  Let's drop the
356
 
       * mutex for now so we don't block up too badly.
357
 
       */
358
 
      pthread_mutex_unlock(&wsi->mutex);
359
 
 
360
 
      struct wsi_x11_connection *wsi_conn =
361
 
         wsi_x11_connection_create(wsi_dev, conn);
362
 
      if (!wsi_conn)
363
 
         return NULL;
364
 
 
365
 
      pthread_mutex_lock(&wsi->mutex);
366
 
 
367
 
      entry = _mesa_hash_table_search(wsi->connections, conn);
368
 
      if (entry) {
369
 
         /* Oops, someone raced us to it */
370
 
         wsi_x11_connection_destroy(wsi_dev, wsi_conn);
371
 
      } else {
372
 
         entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
373
 
      }
374
 
   }
375
 
 
376
 
   pthread_mutex_unlock(&wsi->mutex);
377
 
 
378
 
   return entry->data;
379
 
}
380
 
 
381
 
struct surface_format {
382
 
   VkFormat format;
383
 
   unsigned bits_per_rgb;
384
 
};
385
 
 
386
 
static const struct surface_format formats[] = {
387
 
   { VK_FORMAT_B8G8R8A8_SRGB,             8 },
388
 
   { VK_FORMAT_B8G8R8A8_UNORM,            8 },
389
 
   { VK_FORMAT_A2R10G10B10_UNORM_PACK32, 10 },
390
 
};
391
 
 
392
 
static const VkPresentModeKHR present_modes[] = {
393
 
   VK_PRESENT_MODE_IMMEDIATE_KHR,
394
 
   VK_PRESENT_MODE_MAILBOX_KHR,
395
 
   VK_PRESENT_MODE_FIFO_KHR,
396
 
   VK_PRESENT_MODE_FIFO_RELAXED_KHR,
397
 
};
398
 
 
399
 
static xcb_screen_t *
400
 
get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
401
 
{
402
 
   xcb_screen_iterator_t screen_iter =
403
 
      xcb_setup_roots_iterator(xcb_get_setup(conn));
404
 
 
405
 
   for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
406
 
      if (screen_iter.data->root == root)
407
 
         return screen_iter.data;
408
 
   }
409
 
 
410
 
   return NULL;
411
 
}
412
 
 
413
 
static xcb_visualtype_t *
414
 
screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
415
 
                      unsigned *depth)
416
 
{
417
 
   xcb_depth_iterator_t depth_iter =
418
 
      xcb_screen_allowed_depths_iterator(screen);
419
 
 
420
 
   for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
421
 
      xcb_visualtype_iterator_t visual_iter =
422
 
         xcb_depth_visuals_iterator (depth_iter.data);
423
 
 
424
 
      for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
425
 
         if (visual_iter.data->visual_id == visual_id) {
426
 
            if (depth)
427
 
               *depth = depth_iter.data->depth;
428
 
            return visual_iter.data;
429
 
         }
430
 
      }
431
 
   }
432
 
 
433
 
   return NULL;
434
 
}
435
 
 
436
 
static xcb_visualtype_t *
437
 
connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id)
438
 
{
439
 
   xcb_screen_iterator_t screen_iter =
440
 
      xcb_setup_roots_iterator(xcb_get_setup(conn));
441
 
 
442
 
   /* For this we have to iterate over all of the screens which is rather
443
 
    * annoying.  Fortunately, there is probably only 1.
444
 
    */
445
 
   for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
446
 
      xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
447
 
                                                       visual_id, NULL);
448
 
      if (visual)
449
 
         return visual;
450
 
   }
451
 
 
452
 
   return NULL;
453
 
}
454
 
 
455
 
static xcb_visualtype_t *
456
 
get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
457
 
                          unsigned *depth)
458
 
{
459
 
   xcb_query_tree_cookie_t tree_cookie;
460
 
   xcb_get_window_attributes_cookie_t attrib_cookie;
461
 
   xcb_query_tree_reply_t *tree;
462
 
   xcb_get_window_attributes_reply_t *attrib;
463
 
 
464
 
   tree_cookie = xcb_query_tree(conn, window);
465
 
   attrib_cookie = xcb_get_window_attributes(conn, window);
466
 
 
467
 
   tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
468
 
   attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
469
 
   if (attrib == NULL || tree == NULL) {
470
 
      free(attrib);
471
 
      free(tree);
472
 
      return NULL;
473
 
   }
474
 
 
475
 
   xcb_window_t root = tree->root;
476
 
   xcb_visualid_t visual_id = attrib->visual;
477
 
   free(attrib);
478
 
   free(tree);
479
 
 
480
 
   xcb_screen_t *screen = get_screen_for_root(conn, root);
481
 
   if (screen == NULL)
482
 
      return NULL;
483
 
 
484
 
   return screen_get_visualtype(screen, visual_id, depth);
485
 
}
486
 
 
487
 
static bool
488
 
visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
489
 
{
490
 
   uint32_t rgb_mask = visual->red_mask |
491
 
                       visual->green_mask |
492
 
                       visual->blue_mask;
493
 
 
494
 
   uint32_t all_mask = 0xffffffff >> (32 - depth);
495
 
 
496
 
   /* Do we have bits left over after RGB? */
497
 
   return (all_mask & ~rgb_mask) != 0;
498
 
}
499
 
 
500
 
static bool
501
 
visual_supported(xcb_visualtype_t *visual)
502
 
{
503
 
   if (!visual)
504
 
      return false;
505
 
 
506
 
   return visual->bits_per_rgb_value == 8 || visual->bits_per_rgb_value == 10;
507
 
}
508
 
 
509
 
VKAPI_ATTR VkBool32 VKAPI_CALL
510
 
wsi_GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
511
 
                                               uint32_t queueFamilyIndex,
512
 
                                               xcb_connection_t *connection,
513
 
                                               xcb_visualid_t visual_id)
514
 
{
515
 
   VK_FROM_HANDLE(vk_physical_device, pdevice, physicalDevice);
516
 
   struct wsi_device *wsi_device = pdevice->wsi_device;
517
 
   struct wsi_x11_connection *wsi_conn =
518
 
      wsi_x11_get_connection(wsi_device, connection);
519
 
 
520
 
   if (!wsi_conn)
521
 
      return false;
522
 
 
523
 
   if (!wsi_device->sw) {
524
 
      if (!wsi_x11_check_for_dri3(wsi_conn))
525
 
         return false;
526
 
   }
527
 
 
528
 
   if (!visual_supported(connection_get_visualtype(connection, visual_id)))
529
 
      return false;
530
 
 
531
 
   return true;
532
 
}
533
 
 
534
 
VKAPI_ATTR VkBool32 VKAPI_CALL
535
 
wsi_GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
536
 
                                                uint32_t queueFamilyIndex,
537
 
                                                Display *dpy,
538
 
                                                VisualID visualID)
539
 
{
540
 
   return wsi_GetPhysicalDeviceXcbPresentationSupportKHR(physicalDevice,
541
 
                                                         queueFamilyIndex,
542
 
                                                         XGetXCBConnection(dpy),
543
 
                                                         visualID);
544
 
}
545
 
 
546
 
static xcb_connection_t*
547
 
x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
548
 
{
549
 
   if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
550
 
      return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
551
 
   else
552
 
      return ((VkIcdSurfaceXcb *)icd_surface)->connection;
553
 
}
554
 
 
555
 
static xcb_window_t
556
 
x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
557
 
{
558
 
   if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
559
 
      return ((VkIcdSurfaceXlib *)icd_surface)->window;
560
 
   else
561
 
      return ((VkIcdSurfaceXcb *)icd_surface)->window;
562
 
}
563
 
 
564
 
static VkResult
565
 
x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
566
 
                        struct wsi_device *wsi_device,
567
 
                        uint32_t queueFamilyIndex,
568
 
                        VkBool32* pSupported)
569
 
{
570
 
   xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
571
 
   xcb_window_t window = x11_surface_get_window(icd_surface);
572
 
 
573
 
   struct wsi_x11_connection *wsi_conn =
574
 
      wsi_x11_get_connection(wsi_device, conn);
575
 
   if (!wsi_conn)
576
 
      return VK_ERROR_OUT_OF_HOST_MEMORY;
577
 
 
578
 
   if (!wsi_device->sw) {
579
 
      if (!wsi_x11_check_for_dri3(wsi_conn)) {
580
 
         *pSupported = false;
581
 
         return VK_SUCCESS;
582
 
      }
583
 
   }
584
 
 
585
 
   if (!visual_supported(get_visualtype_for_window(conn, window, NULL))) {
586
 
      *pSupported = false;
587
 
      return VK_SUCCESS;
588
 
   }
589
 
 
590
 
   *pSupported = true;
591
 
   return VK_SUCCESS;
592
 
}
593
 
 
594
 
static uint32_t
595
 
x11_get_min_image_count(const struct wsi_device *wsi_device)
596
 
{
597
 
   if (wsi_device->x11.override_minImageCount)
598
 
      return wsi_device->x11.override_minImageCount;
599
 
 
600
 
   /* For IMMEDIATE and FIFO, most games work in a pipelined manner where the
601
 
    * can produce frames at a rate of 1/MAX(CPU duration, GPU duration), but
602
 
    * the render latency is CPU duration + GPU duration.
603
 
    *
604
 
    * This means that with scanout from pageflipping we need 3 frames to run
605
 
    * full speed:
606
 
    * 1) CPU rendering work
607
 
    * 2) GPU rendering work
608
 
    * 3) scanout
609
 
    *
610
 
    * Once we have a nonblocking acquire that returns a semaphore we can merge
611
 
    * 1 and 3. Hence the ideal implementation needs only 2 images, but games
612
 
    * cannot tellwe currently do not have an ideal implementation and that
613
 
    * hence they need to allocate 3 images. So let us do it for them.
614
 
    *
615
 
    * This is a tradeoff as it uses more memory than needed for non-fullscreen
616
 
    * and non-performance intensive applications.
617
 
    */
618
 
   return 3;
619
 
}
620
 
 
621
 
static VkResult
622
 
x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
623
 
                             struct wsi_device *wsi_device,
624
 
                             VkSurfaceCapabilitiesKHR *caps)
625
 
{
626
 
   xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
627
 
   xcb_window_t window = x11_surface_get_window(icd_surface);
628
 
   xcb_get_geometry_cookie_t geom_cookie;
629
 
   xcb_generic_error_t *err;
630
 
   xcb_get_geometry_reply_t *geom;
631
 
   unsigned visual_depth;
632
 
 
633
 
   geom_cookie = xcb_get_geometry(conn, window);
634
 
 
635
 
   /* This does a round-trip.  This is why we do get_geometry first and
636
 
    * wait to read the reply until after we have a visual.
637
 
    */
638
 
   xcb_visualtype_t *visual =
639
 
      get_visualtype_for_window(conn, window, &visual_depth);
640
 
 
641
 
   if (!visual)
642
 
      return VK_ERROR_SURFACE_LOST_KHR;
643
 
 
644
 
   geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
645
 
   if (geom) {
646
 
      VkExtent2D extent = { geom->width, geom->height };
647
 
      caps->currentExtent = extent;
648
 
      caps->minImageExtent = extent;
649
 
      caps->maxImageExtent = extent;
650
 
   }
651
 
   free(err);
652
 
   free(geom);
653
 
   if (!geom)
654
 
       return VK_ERROR_SURFACE_LOST_KHR;
655
 
 
656
 
   if (visual_has_alpha(visual, visual_depth)) {
657
 
      caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
658
 
                                      VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
659
 
   } else {
660
 
      caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
661
 
                                      VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
662
 
   }
663
 
 
664
 
   caps->minImageCount = x11_get_min_image_count(wsi_device);
665
 
   /* There is no real maximum */
666
 
   caps->maxImageCount = 0;
667
 
 
668
 
   caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
669
 
   caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
670
 
   caps->maxImageArrayLayers = 1;
671
 
   caps->supportedUsageFlags =
672
 
      VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
673
 
      VK_IMAGE_USAGE_SAMPLED_BIT |
674
 
      VK_IMAGE_USAGE_TRANSFER_DST_BIT |
675
 
      VK_IMAGE_USAGE_STORAGE_BIT |
676
 
      VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
677
 
      VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
678
 
 
679
 
   return VK_SUCCESS;
680
 
}
681
 
 
682
 
static VkResult
683
 
x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
684
 
                              struct wsi_device *wsi_device,
685
 
                              const void *info_next,
686
 
                              VkSurfaceCapabilities2KHR *caps)
687
 
{
688
 
   assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
689
 
 
690
 
   VkResult result =
691
 
      x11_surface_get_capabilities(icd_surface, wsi_device,
692
 
                                   &caps->surfaceCapabilities);
693
 
 
694
 
   if (result != VK_SUCCESS)
695
 
      return result;
696
 
 
697
 
   vk_foreach_struct(ext, caps->pNext) {
698
 
      switch (ext->sType) {
699
 
      case VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR: {
700
 
         VkSurfaceProtectedCapabilitiesKHR *protected = (void *)ext;
701
 
         protected->supportsProtected = VK_FALSE;
702
 
         break;
703
 
      }
704
 
 
705
 
      default:
706
 
         /* Ignored */
707
 
         break;
708
 
      }
709
 
   }
710
 
 
711
 
   return result;
712
 
}
713
 
 
714
 
static bool
715
 
get_sorted_vk_formats(VkIcdSurfaceBase *surface, struct wsi_device *wsi_device,
716
 
                      VkFormat *sorted_formats, unsigned *count)
717
 
{
718
 
   xcb_connection_t *conn = x11_surface_get_connection(surface);
719
 
   xcb_window_t window = x11_surface_get_window(surface);
720
 
   xcb_visualtype_t *visual = get_visualtype_for_window(conn, window, NULL);
721
 
   if (!visual)
722
 
      return false;
723
 
 
724
 
   *count = 0;
725
 
   for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
726
 
      if (formats[i].bits_per_rgb == visual->bits_per_rgb_value)
727
 
         sorted_formats[(*count)++] = formats[i].format;
728
 
   }
729
 
 
730
 
   if (wsi_device->force_bgra8_unorm_first) {
731
 
      for (unsigned i = 0; i < *count; i++) {
732
 
         if (sorted_formats[i] == VK_FORMAT_B8G8R8A8_UNORM) {
733
 
            sorted_formats[i] = sorted_formats[0];
734
 
            sorted_formats[0] = VK_FORMAT_B8G8R8A8_UNORM;
735
 
            break;
736
 
         }
737
 
      }
738
 
   }
739
 
 
740
 
   return true;
741
 
}
742
 
 
743
 
static VkResult
744
 
x11_surface_get_formats(VkIcdSurfaceBase *surface,
745
 
                        struct wsi_device *wsi_device,
746
 
                        uint32_t *pSurfaceFormatCount,
747
 
                        VkSurfaceFormatKHR *pSurfaceFormats)
748
 
{
749
 
   VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormatKHR, out,
750
 
                          pSurfaceFormats, pSurfaceFormatCount);
751
 
 
752
 
   unsigned count;
753
 
   VkFormat sorted_formats[ARRAY_SIZE(formats)];
754
 
   if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
755
 
      return VK_ERROR_SURFACE_LOST_KHR;
756
 
 
757
 
   for (unsigned i = 0; i < count; i++) {
758
 
      vk_outarray_append_typed(VkSurfaceFormatKHR, &out, f) {
759
 
         f->format = sorted_formats[i];
760
 
         f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
761
 
      }
762
 
   }
763
 
 
764
 
   return vk_outarray_status(&out);
765
 
}
766
 
 
767
 
static VkResult
768
 
x11_surface_get_formats2(VkIcdSurfaceBase *surface,
769
 
                        struct wsi_device *wsi_device,
770
 
                        const void *info_next,
771
 
                        uint32_t *pSurfaceFormatCount,
772
 
                        VkSurfaceFormat2KHR *pSurfaceFormats)
773
 
{
774
 
   VK_OUTARRAY_MAKE_TYPED(VkSurfaceFormat2KHR, out,
775
 
                          pSurfaceFormats, pSurfaceFormatCount);
776
 
 
777
 
   unsigned count;
778
 
   VkFormat sorted_formats[ARRAY_SIZE(formats)];
779
 
   if (!get_sorted_vk_formats(surface, wsi_device, sorted_formats, &count))
780
 
      return VK_ERROR_SURFACE_LOST_KHR;
781
 
 
782
 
   for (unsigned i = 0; i < count; i++) {
783
 
      vk_outarray_append_typed(VkSurfaceFormat2KHR, &out, f) {
784
 
         assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
785
 
         f->surfaceFormat.format = sorted_formats[i];
786
 
         f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
787
 
      }
788
 
   }
789
 
 
790
 
   return vk_outarray_status(&out);
791
 
}
792
 
 
793
 
static VkResult
794
 
x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
795
 
                              uint32_t *pPresentModeCount,
796
 
                              VkPresentModeKHR *pPresentModes)
797
 
{
798
 
   if (pPresentModes == NULL) {
799
 
      *pPresentModeCount = ARRAY_SIZE(present_modes);
800
 
      return VK_SUCCESS;
801
 
   }
802
 
 
803
 
   *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
804
 
   typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
805
 
 
806
 
   return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
807
 
      VK_INCOMPLETE : VK_SUCCESS;
808
 
}
809
 
 
810
 
static VkResult
811
 
x11_surface_get_present_rectangles(VkIcdSurfaceBase *icd_surface,
812
 
                                   struct wsi_device *wsi_device,
813
 
                                   uint32_t* pRectCount,
814
 
                                   VkRect2D* pRects)
815
 
{
816
 
   xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
817
 
   xcb_window_t window = x11_surface_get_window(icd_surface);
818
 
   VK_OUTARRAY_MAKE_TYPED(VkRect2D, out, pRects, pRectCount);
819
 
 
820
 
   vk_outarray_append_typed(VkRect2D, &out, rect) {
821
 
      xcb_generic_error_t *err = NULL;
822
 
      xcb_get_geometry_cookie_t geom_cookie = xcb_get_geometry(conn, window);
823
 
      xcb_get_geometry_reply_t *geom =
824
 
         xcb_get_geometry_reply(conn, geom_cookie, &err);
825
 
      free(err);
826
 
      if (geom) {
827
 
         *rect = (VkRect2D) {
828
 
            .offset = { 0, 0 },
829
 
            .extent = { geom->width, geom->height },
830
 
         };
831
 
      }
832
 
      free(geom);
833
 
      if (!geom)
834
 
          return VK_ERROR_SURFACE_LOST_KHR;
835
 
   }
836
 
 
837
 
   return vk_outarray_status(&out);
838
 
}
839
 
 
840
 
VKAPI_ATTR VkResult VKAPI_CALL
841
 
wsi_CreateXcbSurfaceKHR(VkInstance _instance,
842
 
                        const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
843
 
                        const VkAllocationCallbacks *pAllocator,
844
 
                        VkSurfaceKHR *pSurface)
845
 
{
846
 
   VK_FROM_HANDLE(vk_instance, instance, _instance);
847
 
   VkIcdSurfaceXcb *surface;
848
 
 
849
 
   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR);
850
 
 
851
 
   surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
852
 
                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
853
 
   if (surface == NULL)
854
 
      return VK_ERROR_OUT_OF_HOST_MEMORY;
855
 
 
856
 
   surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
857
 
   surface->connection = pCreateInfo->connection;
858
 
   surface->window = pCreateInfo->window;
859
 
 
860
 
   *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
861
 
   return VK_SUCCESS;
862
 
}
863
 
 
864
 
VKAPI_ATTR VkResult VKAPI_CALL
865
 
wsi_CreateXlibSurfaceKHR(VkInstance _instance,
866
 
                         const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
867
 
                         const VkAllocationCallbacks *pAllocator,
868
 
                         VkSurfaceKHR *pSurface)
869
 
{
870
 
   VK_FROM_HANDLE(vk_instance, instance, _instance);
871
 
   VkIcdSurfaceXlib *surface;
872
 
 
873
 
   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR);
874
 
 
875
 
   surface = vk_alloc2(&instance->alloc, pAllocator, sizeof *surface, 8,
876
 
                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
877
 
   if (surface == NULL)
878
 
      return VK_ERROR_OUT_OF_HOST_MEMORY;
879
 
 
880
 
   surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
881
 
   surface->dpy = pCreateInfo->dpy;
882
 
   surface->window = pCreateInfo->window;
883
 
 
884
 
   *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
885
 
   return VK_SUCCESS;
886
 
}
887
 
 
888
 
struct x11_image {
889
 
   struct wsi_image                          base;
890
 
   xcb_pixmap_t                              pixmap;
891
 
   bool                                      busy;
892
 
   bool                                      present_queued;
893
 
   struct xshmfence *                        shm_fence;
894
 
   uint32_t                                  sync_fence;
895
 
   uint32_t                                  serial;
896
 
   xcb_shm_seg_t                             shmseg;
897
 
   int                                       shmid;
898
 
   uint8_t *                                 shmaddr;
899
 
};
900
 
 
901
 
struct x11_swapchain {
902
 
   struct wsi_swapchain                        base;
903
 
 
904
 
   bool                                         has_dri3_modifiers;
905
 
   bool                                         has_mit_shm;
906
 
 
907
 
   xcb_connection_t *                           conn;
908
 
   xcb_window_t                                 window;
909
 
   xcb_gc_t                                     gc;
910
 
   uint32_t                                     depth;
911
 
   VkExtent2D                                   extent;
912
 
 
913
 
   xcb_present_event_t                          event_id;
914
 
   xcb_special_event_t *                        special_event;
915
 
   uint64_t                                     send_sbc;
916
 
   uint64_t                                     last_present_msc;
917
 
   uint32_t                                     stamp;
918
 
   atomic_int                                   sent_image_count;
919
 
 
920
 
   bool                                         has_present_queue;
921
 
   bool                                         has_acquire_queue;
922
 
   VkResult                                     status;
923
 
   bool                                         copy_is_suboptimal;
924
 
   struct wsi_queue                             present_queue;
925
 
   struct wsi_queue                             acquire_queue;
926
 
   pthread_t                                    queue_manager;
927
 
 
928
 
   struct x11_image                             images[0];
929
 
};
930
 
VK_DEFINE_NONDISP_HANDLE_CASTS(x11_swapchain, base.base, VkSwapchainKHR,
931
 
                               VK_OBJECT_TYPE_SWAPCHAIN_KHR)
932
 
 
933
 
/**
934
 
 * Update the swapchain status with the result of an operation, and return
935
 
 * the combined status. The chain status will eventually be returned from
936
 
 * AcquireNextImage and QueuePresent.
937
 
 *
938
 
 * We make sure to 'stick' more pessimistic statuses: an out-of-date error
939
 
 * is permanent once seen, and every subsequent call will return this. If
940
 
 * this has not been seen, success will be returned.
941
 
 */
942
 
static VkResult
943
 
_x11_swapchain_result(struct x11_swapchain *chain, VkResult result,
944
 
                      const char *file, int line)
945
 
{
946
 
   /* Prioritise returning existing errors for consistency. */
947
 
   if (chain->status < 0)
948
 
      return chain->status;
949
 
 
950
 
   /* If we have a new error, mark it as permanent on the chain and return. */
951
 
   if (result < 0) {
952
 
#ifndef NDEBUG
953
 
      fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
954
 
              file, line, vk_Result_to_str(result));
955
 
#endif
956
 
      chain->status = result;
957
 
      return result;
958
 
   }
959
 
 
960
 
   /* Return temporary errors, but don't persist them. */
961
 
   if (result == VK_TIMEOUT || result == VK_NOT_READY)
962
 
      return result;
963
 
 
964
 
   /* Suboptimal isn't an error, but is a status which sticks to the swapchain
965
 
    * and is always returned rather than success.
966
 
    */
967
 
   if (result == VK_SUBOPTIMAL_KHR) {
968
 
#ifndef NDEBUG
969
 
      if (chain->status != VK_SUBOPTIMAL_KHR) {
970
 
         fprintf(stderr, "%s:%d: Swapchain status changed to %s\n",
971
 
                 file, line, vk_Result_to_str(result));
972
 
      }
973
 
#endif
974
 
      chain->status = result;
975
 
      return result;
976
 
   }
977
 
 
978
 
   /* No changes, so return the last status. */
979
 
   return chain->status;
980
 
}
981
 
#define x11_swapchain_result(chain, result) \
982
 
   _x11_swapchain_result(chain, result, __FILE__, __LINE__)
983
 
 
984
 
static struct wsi_image *
985
 
x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
986
 
{
987
 
   struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
988
 
   return &chain->images[image_index].base;
989
 
}
990
 
 
991
 
/**
992
 
 * Process an X11 Present event. Does not update chain->status.
993
 
 */
994
 
static VkResult
995
 
x11_handle_dri3_present_event(struct x11_swapchain *chain,
996
 
                              xcb_present_generic_event_t *event)
997
 
{
998
 
   switch (event->evtype) {
999
 
   case XCB_PRESENT_CONFIGURE_NOTIFY: {
1000
 
      xcb_present_configure_notify_event_t *config = (void *) event;
1001
 
 
1002
 
      if (config->width != chain->extent.width ||
1003
 
          config->height != chain->extent.height)
1004
 
         return VK_SUBOPTIMAL_KHR;
1005
 
 
1006
 
      break;
1007
 
   }
1008
 
 
1009
 
   case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
1010
 
      xcb_present_idle_notify_event_t *idle = (void *) event;
1011
 
 
1012
 
      for (unsigned i = 0; i < chain->base.image_count; i++) {
1013
 
         if (chain->images[i].pixmap == idle->pixmap) {
1014
 
            chain->images[i].busy = false;
1015
 
            chain->sent_image_count--;
1016
 
            assert(chain->sent_image_count >= 0);
1017
 
            if (chain->has_acquire_queue)
1018
 
               wsi_queue_push(&chain->acquire_queue, i);
1019
 
            break;
1020
 
         }
1021
 
      }
1022
 
 
1023
 
      break;
1024
 
   }
1025
 
 
1026
 
   case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
1027
 
      xcb_present_complete_notify_event_t *complete = (void *) event;
1028
 
      if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP) {
1029
 
         unsigned i;
1030
 
         for (i = 0; i < chain->base.image_count; i++) {
1031
 
            struct x11_image *image = &chain->images[i];
1032
 
            if (image->present_queued && image->serial == complete->serial)
1033
 
               image->present_queued = false;
1034
 
         }
1035
 
         chain->last_present_msc = complete->msc;
1036
 
      }
1037
 
 
1038
 
      VkResult result = VK_SUCCESS;
1039
 
      switch (complete->mode) {
1040
 
      case XCB_PRESENT_COMPLETE_MODE_COPY:
1041
 
         if (chain->copy_is_suboptimal)
1042
 
            result = VK_SUBOPTIMAL_KHR;
1043
 
         break;
1044
 
      case XCB_PRESENT_COMPLETE_MODE_FLIP:
1045
 
         /* If we ever go from flipping to copying, the odds are very likely
1046
 
          * that we could reallocate in a more optimal way if we didn't have
1047
 
          * to care about scanout, so we always do this.
1048
 
          */
1049
 
         chain->copy_is_suboptimal = true;
1050
 
         break;
1051
 
#ifdef HAVE_DRI3_MODIFIERS
1052
 
      case XCB_PRESENT_COMPLETE_MODE_SUBOPTIMAL_COPY:
1053
 
         /* The winsys is now trying to flip directly and cannot due to our
1054
 
          * configuration. Request the user reallocate.
1055
 
          */
1056
 
         result = VK_SUBOPTIMAL_KHR;
1057
 
         break;
1058
 
#endif
1059
 
      default:
1060
 
         break;
1061
 
      }
1062
 
 
1063
 
      return result;
1064
 
   }
1065
 
 
1066
 
   default:
1067
 
      break;
1068
 
   }
1069
 
 
1070
 
   return VK_SUCCESS;
1071
 
}
1072
 
 
1073
 
 
1074
 
static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
1075
 
{
1076
 
   uint64_t current_time = os_time_get_nano();
1077
 
 
1078
 
   timeout = MIN2(UINT64_MAX - current_time, timeout);
1079
 
 
1080
 
   return current_time + timeout;
1081
 
}
1082
 
 
1083
 
/**
1084
 
 * Acquire a ready-to-use image directly from our swapchain. If all images are
1085
 
 * busy wait until one is not anymore or till timeout.
1086
 
 */
1087
 
static VkResult
1088
 
x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
1089
 
                                uint32_t *image_index, uint64_t timeout)
1090
 
{
1091
 
   xcb_generic_event_t *event;
1092
 
   struct pollfd pfds;
1093
 
   uint64_t atimeout;
1094
 
   while (1) {
1095
 
      for (uint32_t i = 0; i < chain->base.image_count; i++) {
1096
 
         if (!chain->images[i].busy) {
1097
 
            /* We found a non-busy image */
1098
 
            xshmfence_await(chain->images[i].shm_fence);
1099
 
            *image_index = i;
1100
 
            chain->images[i].busy = true;
1101
 
            return x11_swapchain_result(chain, VK_SUCCESS);
1102
 
         }
1103
 
      }
1104
 
 
1105
 
      xcb_flush(chain->conn);
1106
 
 
1107
 
      if (timeout == UINT64_MAX) {
1108
 
         event = xcb_wait_for_special_event(chain->conn, chain->special_event);
1109
 
         if (!event)
1110
 
            return x11_swapchain_result(chain, VK_ERROR_SURFACE_LOST_KHR);
1111
 
      } else {
1112
 
         event = xcb_poll_for_special_event(chain->conn, chain->special_event);
1113
 
         if (!event) {
1114
 
            int ret;
1115
 
            if (timeout == 0)
1116
 
               return x11_swapchain_result(chain, VK_NOT_READY);
1117
 
 
1118
 
            atimeout = wsi_get_absolute_timeout(timeout);
1119
 
 
1120
 
            pfds.fd = xcb_get_file_descriptor(chain->conn);
1121
 
            pfds.events = POLLIN;
1122
 
            ret = poll(&pfds, 1, timeout / 1000 / 1000);
1123
 
            if (ret == 0)
1124
 
               return x11_swapchain_result(chain, VK_TIMEOUT);
1125
 
            if (ret == -1)
1126
 
               return x11_swapchain_result(chain, VK_ERROR_OUT_OF_DATE_KHR);
1127
 
 
1128
 
            /* If a non-special event happens, the fd will still
1129
 
             * poll. So recalculate the timeout now just in case.
1130
 
             */
1131
 
            uint64_t current_time = os_time_get_nano();
1132
 
            if (atimeout > current_time)
1133
 
               timeout = atimeout - current_time;
1134
 
            else
1135
 
               timeout = 0;
1136
 
            continue;
1137
 
         }
1138
 
      }
1139
 
 
1140
 
      /* Update the swapchain status here. We may catch non-fatal errors here,
1141
 
       * in which case we need to update the status and continue.
1142
 
       */
1143
 
      VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1144
 
      /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1145
 
      result = x11_swapchain_result(chain, result);
1146
 
      free(event);
1147
 
      if (result < 0)
1148
 
         return result;
1149
 
   }
1150
 
}
1151
 
 
1152
 
/**
1153
 
 * Acquire a ready-to-use image from the acquire-queue. Only relevant in fifo
1154
 
 * presentation mode.
1155
 
 */
1156
 
static VkResult
1157
 
x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
1158
 
                                  uint32_t *image_index_out, uint64_t timeout)
1159
 
{
1160
 
   assert(chain->has_acquire_queue);
1161
 
 
1162
 
   uint32_t image_index;
1163
 
   VkResult result = wsi_queue_pull(&chain->acquire_queue,
1164
 
                                    &image_index, timeout);
1165
 
   if (result < 0 || result == VK_TIMEOUT) {
1166
 
      /* On error, the thread has shut down, so safe to update chain->status.
1167
 
       * Calling x11_swapchain_result with VK_TIMEOUT won't modify
1168
 
       * chain->status so that is also safe.
1169
 
       */
1170
 
      return x11_swapchain_result(chain, result);
1171
 
   } else if (chain->status < 0) {
1172
 
      return chain->status;
1173
 
   }
1174
 
 
1175
 
   assert(image_index < chain->base.image_count);
1176
 
   xshmfence_await(chain->images[image_index].shm_fence);
1177
 
 
1178
 
   *image_index_out = image_index;
1179
 
 
1180
 
   return chain->status;
1181
 
}
1182
 
 
1183
 
/**
1184
 
 * Send image to X server via Present extension.
1185
 
 */
1186
 
static VkResult
1187
 
x11_present_to_x11_dri3(struct x11_swapchain *chain, uint32_t image_index,
1188
 
                        uint64_t target_msc)
1189
 
{
1190
 
   struct x11_image *image = &chain->images[image_index];
1191
 
 
1192
 
   assert(image_index < chain->base.image_count);
1193
 
 
1194
 
   uint32_t options = XCB_PRESENT_OPTION_NONE;
1195
 
 
1196
 
   int64_t divisor = 0;
1197
 
   int64_t remainder = 0;
1198
 
 
1199
 
   struct wsi_x11_connection *wsi_conn =
1200
 
      wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1201
 
   if (!wsi_conn)
1202
 
      return VK_ERROR_OUT_OF_HOST_MEMORY;
1203
 
 
1204
 
   if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR ||
1205
 
       (chain->base.present_mode == VK_PRESENT_MODE_MAILBOX_KHR &&
1206
 
        wsi_conn->is_xwayland) ||
1207
 
       chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR)
1208
 
      options |= XCB_PRESENT_OPTION_ASYNC;
1209
 
 
1210
 
#ifdef HAVE_DRI3_MODIFIERS
1211
 
   if (chain->has_dri3_modifiers)
1212
 
      options |= XCB_PRESENT_OPTION_SUBOPTIMAL;
1213
 
#endif
1214
 
 
1215
 
   /* Poll for any available event and update the swapchain status. This could
1216
 
    * update the status of the swapchain to SUBOPTIMAL or OUT_OF_DATE if the
1217
 
    * associated X11 surface has been resized.
1218
 
    */
1219
 
   xcb_generic_event_t *event;
1220
 
   while ((event = xcb_poll_for_special_event(chain->conn, chain->special_event))) {
1221
 
      VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
1222
 
      /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1223
 
      result = x11_swapchain_result(chain, result);
1224
 
      free(event);
1225
 
      if (result < 0)
1226
 
         return result;
1227
 
   }
1228
 
 
1229
 
   xshmfence_reset(image->shm_fence);
1230
 
 
1231
 
   ++chain->sent_image_count;
1232
 
   assert(chain->sent_image_count <= chain->base.image_count);
1233
 
 
1234
 
   ++chain->send_sbc;
1235
 
   image->present_queued = true;
1236
 
   image->serial = (uint32_t) chain->send_sbc;
1237
 
 
1238
 
   xcb_void_cookie_t cookie =
1239
 
      xcb_present_pixmap(chain->conn,
1240
 
                         chain->window,
1241
 
                         image->pixmap,
1242
 
                         image->serial,
1243
 
                         0,                                    /* valid */
1244
 
                         0,                                    /* update */
1245
 
                         0,                                    /* x_off */
1246
 
                         0,                                    /* y_off */
1247
 
                         XCB_NONE,                             /* target_crtc */
1248
 
                         XCB_NONE,
1249
 
                         image->sync_fence,
1250
 
                         options,
1251
 
                         target_msc,
1252
 
                         divisor,
1253
 
                         remainder, 0, NULL);
1254
 
   xcb_discard_reply(chain->conn, cookie.sequence);
1255
 
 
1256
 
   xcb_flush(chain->conn);
1257
 
 
1258
 
   return x11_swapchain_result(chain, VK_SUCCESS);
1259
 
}
1260
 
 
1261
 
/**
1262
 
 * Send image to X server unaccelerated (software drivers).
1263
 
 */
1264
 
static VkResult
1265
 
x11_present_to_x11_sw(struct x11_swapchain *chain, uint32_t image_index,
1266
 
                      uint64_t target_msc)
1267
 
{
1268
 
   struct x11_image *image = &chain->images[image_index];
1269
 
 
1270
 
   xcb_void_cookie_t cookie;
1271
 
   void *myptr;
1272
 
   size_t hdr_len = sizeof(xcb_put_image_request_t);
1273
 
   int stride_b = image->base.row_pitches[0];
1274
 
   size_t size = (hdr_len + stride_b * chain->extent.height) >> 2;
1275
 
   uint64_t max_req_len = xcb_get_maximum_request_length(chain->conn);
1276
 
 
1277
 
   chain->base.wsi->MapMemory(chain->base.device,
1278
 
                              image->base.memory,
1279
 
                              0, 0, 0, &myptr);
1280
 
 
1281
 
   if (size < max_req_len) {
1282
 
      cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1283
 
                             chain->window,
1284
 
                             chain->gc,
1285
 
                             image->base.row_pitches[0] / 4,
1286
 
                             chain->extent.height,
1287
 
                             0,0,0,24,
1288
 
                             image->base.row_pitches[0] * chain->extent.height,
1289
 
                             myptr);
1290
 
      xcb_discard_reply(chain->conn, cookie.sequence);
1291
 
   } else {
1292
 
      int num_lines = ((max_req_len << 2) - hdr_len) / stride_b;
1293
 
      int y_start = 0;
1294
 
      int y_todo = chain->extent.height;
1295
 
      while (y_todo) {
1296
 
         int this_lines = MIN2(num_lines, y_todo);
1297
 
         cookie = xcb_put_image(chain->conn, XCB_IMAGE_FORMAT_Z_PIXMAP,
1298
 
                                chain->window,
1299
 
                                chain->gc,
1300
 
                                image->base.row_pitches[0] / 4,
1301
 
                                this_lines,
1302
 
                                0,y_start,0,24,
1303
 
                                this_lines * stride_b,
1304
 
                                (const uint8_t *)myptr + (y_start * stride_b));
1305
 
         xcb_discard_reply(chain->conn, cookie.sequence);
1306
 
         y_start += this_lines;
1307
 
         y_todo -= this_lines;
1308
 
      }
1309
 
   }
1310
 
 
1311
 
   chain->base.wsi->UnmapMemory(chain->base.device, image->base.memory);
1312
 
   xcb_flush(chain->conn);
1313
 
   return x11_swapchain_result(chain, VK_SUCCESS);
1314
 
}
1315
 
 
1316
 
/**
1317
 
 * Send image to the X server for presentation at target_msc.
1318
 
 */
1319
 
static VkResult
1320
 
x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
1321
 
                   uint64_t target_msc)
1322
 
{
1323
 
   if (chain->base.wsi->sw && !chain->has_mit_shm)
1324
 
      return x11_present_to_x11_sw(chain, image_index, target_msc);
1325
 
   return x11_present_to_x11_dri3(chain, image_index, target_msc);
1326
 
}
1327
 
 
1328
 
/**
1329
 
 * Acquire a ready-to-use image from the swapchain.
1330
 
 *
1331
 
 * This means usually that the image is not waiting on presentation and that the
1332
 
 * image has been released by the X server to be used again by the consumer.
1333
 
 */
1334
 
static VkResult
1335
 
x11_acquire_next_image(struct wsi_swapchain *anv_chain,
1336
 
                       const VkAcquireNextImageInfoKHR *info,
1337
 
                       uint32_t *image_index)
1338
 
{
1339
 
   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1340
 
   uint64_t timeout = info->timeout;
1341
 
 
1342
 
   /* If the swapchain is in an error state, don't go any further. */
1343
 
   if (chain->status < 0)
1344
 
      return chain->status;
1345
 
 
1346
 
   /* For software drivers and without shared memory we only render to a single image. */
1347
 
   if (chain->base.wsi->sw && !chain->has_mit_shm) {
1348
 
      *image_index = 0;
1349
 
      return VK_SUCCESS;
1350
 
   }
1351
 
 
1352
 
   if (chain->has_acquire_queue) {
1353
 
      return x11_acquire_next_image_from_queue(chain, image_index, timeout);
1354
 
   } else {
1355
 
      return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
1356
 
   }
1357
 
}
1358
 
 
1359
 
/**
1360
 
 * Queue a new presentation of an image that was previously acquired by the
1361
 
 * consumer.
1362
 
 *
1363
 
 * Note that in immediate presentation mode this does not really queue the
1364
 
 * presentation but directly asks the X server to show it.
1365
 
 */
1366
 
static VkResult
1367
 
x11_queue_present(struct wsi_swapchain *anv_chain,
1368
 
                  uint32_t image_index,
1369
 
                  const VkPresentRegionKHR *damage)
1370
 
{
1371
 
   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1372
 
 
1373
 
   /* If the swapchain is in an error state, don't go any further. */
1374
 
   if (chain->status < 0)
1375
 
      return chain->status;
1376
 
 
1377
 
   chain->images[image_index].busy = true;
1378
 
   if (chain->has_present_queue) {
1379
 
      wsi_queue_push(&chain->present_queue, image_index);
1380
 
      return chain->status;
1381
 
   } else {
1382
 
      /* No present queue means immedate mode, so we present immediately. */
1383
 
      return x11_present_to_x11(chain, image_index, 0);
1384
 
   }
1385
 
}
1386
 
 
1387
 
/**
1388
 
 * Decides if an early wait on buffer fences before buffer submission is required. That is for:
1389
 
 *   - Mailbox mode, as otherwise the latest image in the queue might not be fully rendered at
1390
 
 *     present time, what could lead to missing a frame.
1391
 
 *   - Immediate mode under Xwayland, as it works practically the same as mailbox mode using the
1392
 
 *     mailbox mechanism of Wayland. Sending a buffer with fences not yet signalled can make the
1393
 
 *     compositor miss a frame when compositing the final image with this buffer.
1394
 
 *
1395
 
 * Note though that early waits can be disabled in general on Xwayland by setting the
1396
 
 * 'vk_xwayland_wait_ready' DRIConf option to false.
1397
 
 */
1398
 
static bool
1399
 
x11_needs_wait_for_fences(const struct wsi_device *wsi_device,
1400
 
                          struct wsi_x11_connection *wsi_conn,
1401
 
                          VkPresentModeKHR present_mode)
1402
 
{
1403
 
   if (wsi_conn->is_xwayland && !wsi_device->x11.xwaylandWaitReady) {
1404
 
      return false;
1405
 
   }
1406
 
 
1407
 
   switch (present_mode) {
1408
 
   case VK_PRESENT_MODE_MAILBOX_KHR:
1409
 
      return true;
1410
 
   case VK_PRESENT_MODE_IMMEDIATE_KHR:
1411
 
      return wsi_conn->is_xwayland;
1412
 
   default:
1413
 
      return false;
1414
 
   }
1415
 
}
1416
 
 
1417
 
/**
1418
 
 * The number of images that are not owned by X11:
1419
 
 *  (1) in the ownership of the app, or
1420
 
 *  (2) app to take ownership through an acquire, or
1421
 
 *  (3) in the present queue waiting for the FIFO thread to present to X11.
1422
 
 */
1423
 
static unsigned x11_driver_owned_images(const struct x11_swapchain *chain)
1424
 
{
1425
 
   return chain->base.image_count - chain->sent_image_count;
1426
 
}
1427
 
 
1428
 
/**
1429
 
 * Our queue manager. Albeit called x11_manage_fifo_queues only directly
1430
 
 * manages the present-queue and does this in general in fifo and mailbox presentation
1431
 
 * modes (there is no present-queue in immediate mode with the exception of Xwayland).
1432
 
 *
1433
 
 * Runs in a separate thread, blocks and reacts to queued images on the
1434
 
 * present-queue
1435
 
 *
1436
 
 * In mailbox mode the queue management is simplified since we only need to
1437
 
 * pull new images from the present queue and can directly present them.
1438
 
 *
1439
 
 * In fifo mode images can only be presented one after the other. For that after
1440
 
 * sending the image to the X server we wait until the image either has been
1441
 
 * presented or released and only then pull a new image from the present-queue.
1442
 
 */
1443
 
static void *
1444
 
x11_manage_fifo_queues(void *state)
1445
 
{
1446
 
   struct x11_swapchain *chain = state;
1447
 
   struct wsi_x11_connection *wsi_conn =
1448
 
      wsi_x11_get_connection((struct wsi_device*)chain->base.wsi, chain->conn);
1449
 
   VkResult result = VK_SUCCESS;
1450
 
 
1451
 
   assert(chain->has_present_queue);
1452
 
 
1453
 
   u_thread_setname("WSI swapchain queue");
1454
 
 
1455
 
   while (chain->status >= 0) {
1456
 
      /* We can block here unconditionally because after an image was sent to
1457
 
       * the server (later on in this loop) we ensure at least one image is
1458
 
       * acquirable by the consumer or wait there on such an event.
1459
 
       */
1460
 
      uint32_t image_index = 0;
1461
 
      result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
1462
 
      assert(result != VK_TIMEOUT);
1463
 
 
1464
 
      if (result < 0) {
1465
 
         goto fail;
1466
 
      } else if (chain->status < 0) {
1467
 
         /* The status can change underneath us if the swapchain is destroyed
1468
 
          * from another thread.
1469
 
          */
1470
 
         return NULL;
1471
 
      }
1472
 
 
1473
 
      /* Waiting for the GPU work to finish at this point in time is required in certain usage
1474
 
       * scenarios. Otherwise we wait as usual in wsi_common_queue_present.
1475
 
       */
1476
 
      if (x11_needs_wait_for_fences(chain->base.wsi, wsi_conn,
1477
 
                                    chain->base.present_mode)) {
1478
 
         result = chain->base.wsi->WaitForFences(chain->base.device, 1,
1479
 
                                        &chain->base.fences[image_index],
1480
 
                                        true, UINT64_MAX);
1481
 
         if (result != VK_SUCCESS) {
1482
 
            result = VK_ERROR_OUT_OF_DATE_KHR;
1483
 
            goto fail;
1484
 
         }
1485
 
      }
1486
 
 
1487
 
      uint64_t target_msc = 0;
1488
 
      if (chain->has_acquire_queue)
1489
 
         target_msc = chain->last_present_msc + 1;
1490
 
 
1491
 
      result = x11_present_to_x11(chain, image_index, target_msc);
1492
 
      if (result < 0)
1493
 
         goto fail;
1494
 
 
1495
 
      if (chain->has_acquire_queue) {
1496
 
         /* Assume this isn't a swapchain where we force 5 images, because those
1497
 
          * don't end up with an acquire queue at the moment.
1498
 
          */
1499
 
         unsigned min_image_count = x11_get_min_image_count(chain->base.wsi);
1500
 
 
1501
 
         /* With drirc overrides some games have swapchain with less than
1502
 
          * minimum number of images. */
1503
 
         min_image_count = MIN2(min_image_count, chain->base.image_count);
1504
 
 
1505
 
         /* We always need to ensure that the app can have this number of images
1506
 
          * acquired concurrently in between presents:
1507
 
          * "VUID-vkAcquireNextImageKHR-swapchain-01802
1508
 
          *  If the number of currently acquired images is greater than the difference
1509
 
          *  between the number of images in swapchain and the value of
1510
 
          *  VkSurfaceCapabilitiesKHR::minImageCount as returned by a call to
1511
 
          *  vkGetPhysicalDeviceSurfaceCapabilities2KHR with the surface used to
1512
 
          *  create swapchain, timeout must not be UINT64_MAX"
1513
 
          */
1514
 
         unsigned forward_progress_guaranteed_acquired_images =
1515
 
            chain->base.image_count - min_image_count + 1;
1516
 
 
1517
 
         /* Wait for our presentation to occur and ensure we have at least one
1518
 
          * image that can be acquired by the client afterwards. This ensures we
1519
 
          * can pull on the present-queue on the next loop.
1520
 
          */
1521
 
         while (chain->images[image_index].present_queued ||
1522
 
                /* If we have images in the present queue the outer loop won't block and a break
1523
 
                 * here would end up at this loop again, otherwise a break here satisfies
1524
 
                 * VUID-vkAcquireNextImageKHR-swapchain-01802 */
1525
 
                x11_driver_owned_images(chain) < forward_progress_guaranteed_acquired_images) {
1526
 
 
1527
 
            xcb_generic_event_t *event =
1528
 
               xcb_wait_for_special_event(chain->conn, chain->special_event);
1529
 
            if (!event) {
1530
 
               result = VK_ERROR_SURFACE_LOST_KHR;
1531
 
               goto fail;
1532
 
            }
1533
 
 
1534
 
            result = x11_handle_dri3_present_event(chain, (void *)event);
1535
 
            /* Ensure that VK_SUBOPTIMAL_KHR is reported to the application */
1536
 
            result = x11_swapchain_result(chain, result);
1537
 
            free(event);
1538
 
            if (result < 0)
1539
 
               goto fail;
1540
 
         }
1541
 
      }
1542
 
   }
1543
 
 
1544
 
fail:
1545
 
   x11_swapchain_result(chain, result);
1546
 
   if (chain->has_acquire_queue)
1547
 
      wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
1548
 
 
1549
 
   return NULL;
1550
 
}
1551
 
 
1552
 
static uint8_t *
1553
 
alloc_shm(struct wsi_image *imagew, unsigned size)
1554
 
{
1555
 
#ifdef HAVE_SYS_SHM_H
1556
 
   struct x11_image *image = (struct x11_image *)imagew;
1557
 
   image->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
1558
 
   if (image->shmid < 0)
1559
 
      return NULL;
1560
 
 
1561
 
   uint8_t *addr = (uint8_t *)shmat(image->shmid, 0, 0);
1562
 
   /* mark the segment immediately for deletion to avoid leaks */
1563
 
   shmctl(image->shmid, IPC_RMID, 0);
1564
 
 
1565
 
   if (addr == (uint8_t *) -1)
1566
 
      return NULL;
1567
 
 
1568
 
   image->shmaddr = addr;
1569
 
   return addr;
1570
 
#else
1571
 
   return NULL;
1572
 
#endif
1573
 
}
1574
 
 
1575
 
static VkResult
1576
 
x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
1577
 
               const VkSwapchainCreateInfoKHR *pCreateInfo,
1578
 
               const VkAllocationCallbacks* pAllocator,
1579
 
               struct x11_image *image)
1580
 
{
1581
 
   xcb_void_cookie_t cookie;
1582
 
   VkResult result;
1583
 
   uint32_t bpp = 32;
1584
 
   int fence_fd;
1585
 
 
1586
 
   result = wsi_create_image(&chain->base, &chain->base.image_info,
1587
 
                             &image->base);
1588
 
   if (result != VK_SUCCESS)
1589
 
      return result;
1590
 
 
1591
 
   if (chain->base.wsi->sw) {
1592
 
      if (!chain->has_mit_shm) {
1593
 
         image->busy = false;
1594
 
         return VK_SUCCESS;
1595
 
      }
1596
 
 
1597
 
      image->shmseg = xcb_generate_id(chain->conn);
1598
 
 
1599
 
      xcb_shm_attach(chain->conn,
1600
 
                     image->shmseg,
1601
 
                     image->shmid,
1602
 
                     0);
1603
 
      image->pixmap = xcb_generate_id(chain->conn);
1604
 
      cookie = xcb_shm_create_pixmap_checked(chain->conn,
1605
 
                                             image->pixmap,
1606
 
                                             chain->window,
1607
 
                                             image->base.row_pitches[0] / 4,
1608
 
                                             pCreateInfo->imageExtent.height,
1609
 
                                             chain->depth,
1610
 
                                             image->shmseg, 0);
1611
 
      xcb_discard_reply(chain->conn, cookie.sequence);
1612
 
      goto out_fence;
1613
 
   }
1614
 
   image->pixmap = xcb_generate_id(chain->conn);
1615
 
 
1616
 
#ifdef HAVE_DRI3_MODIFIERS
1617
 
   if (image->base.drm_modifier != DRM_FORMAT_MOD_INVALID) {
1618
 
      /* If the image has a modifier, we must have DRI3 v1.2. */
1619
 
      assert(chain->has_dri3_modifiers);
1620
 
 
1621
 
      cookie =
1622
 
         xcb_dri3_pixmap_from_buffers_checked(chain->conn,
1623
 
                                              image->pixmap,
1624
 
                                              chain->window,
1625
 
                                              image->base.num_planes,
1626
 
                                              pCreateInfo->imageExtent.width,
1627
 
                                              pCreateInfo->imageExtent.height,
1628
 
                                              image->base.row_pitches[0],
1629
 
                                              image->base.offsets[0],
1630
 
                                              image->base.row_pitches[1],
1631
 
                                              image->base.offsets[1],
1632
 
                                              image->base.row_pitches[2],
1633
 
                                              image->base.offsets[2],
1634
 
                                              image->base.row_pitches[3],
1635
 
                                              image->base.offsets[3],
1636
 
                                              chain->depth, bpp,
1637
 
                                              image->base.drm_modifier,
1638
 
                                              image->base.fds);
1639
 
   } else
1640
 
#endif
1641
 
   {
1642
 
      /* Without passing modifiers, we can't have multi-plane RGB images. */
1643
 
      assert(image->base.num_planes == 1);
1644
 
 
1645
 
      cookie =
1646
 
         xcb_dri3_pixmap_from_buffer_checked(chain->conn,
1647
 
                                             image->pixmap,
1648
 
                                             chain->window,
1649
 
                                             image->base.sizes[0],
1650
 
                                             pCreateInfo->imageExtent.width,
1651
 
                                             pCreateInfo->imageExtent.height,
1652
 
                                             image->base.row_pitches[0],
1653
 
                                             chain->depth, bpp,
1654
 
                                             image->base.fds[0]);
1655
 
   }
1656
 
 
1657
 
   xcb_discard_reply(chain->conn, cookie.sequence);
1658
 
 
1659
 
   /* XCB has now taken ownership of the FDs. */
1660
 
   for (int i = 0; i < image->base.num_planes; i++)
1661
 
      image->base.fds[i] = -1;
1662
 
 
1663
 
out_fence:
1664
 
   fence_fd = xshmfence_alloc_shm();
1665
 
   if (fence_fd < 0)
1666
 
      goto fail_pixmap;
1667
 
 
1668
 
   image->shm_fence = xshmfence_map_shm(fence_fd);
1669
 
   if (image->shm_fence == NULL)
1670
 
      goto fail_shmfence_alloc;
1671
 
 
1672
 
   image->sync_fence = xcb_generate_id(chain->conn);
1673
 
   xcb_dri3_fence_from_fd(chain->conn,
1674
 
                          image->pixmap,
1675
 
                          image->sync_fence,
1676
 
                          false,
1677
 
                          fence_fd);
1678
 
 
1679
 
   image->busy = false;
1680
 
   xshmfence_trigger(image->shm_fence);
1681
 
 
1682
 
   return VK_SUCCESS;
1683
 
 
1684
 
fail_shmfence_alloc:
1685
 
   close(fence_fd);
1686
 
 
1687
 
fail_pixmap:
1688
 
   cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1689
 
   xcb_discard_reply(chain->conn, cookie.sequence);
1690
 
 
1691
 
   wsi_destroy_image(&chain->base, &image->base);
1692
 
 
1693
 
   return VK_ERROR_INITIALIZATION_FAILED;
1694
 
}
1695
 
 
1696
 
static void
1697
 
x11_image_finish(struct x11_swapchain *chain,
1698
 
                 const VkAllocationCallbacks* pAllocator,
1699
 
                 struct x11_image *image)
1700
 
{
1701
 
   xcb_void_cookie_t cookie;
1702
 
 
1703
 
   if (!chain->base.wsi->sw || chain->has_mit_shm) {
1704
 
      cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
1705
 
      xcb_discard_reply(chain->conn, cookie.sequence);
1706
 
      xshmfence_unmap_shm(image->shm_fence);
1707
 
 
1708
 
      cookie = xcb_free_pixmap(chain->conn, image->pixmap);
1709
 
      xcb_discard_reply(chain->conn, cookie.sequence);
1710
 
   }
1711
 
 
1712
 
   wsi_destroy_image(&chain->base, &image->base);
1713
 
#ifdef HAVE_SYS_SHM_H
1714
 
   if (image->shmaddr)
1715
 
      shmdt(image->shmaddr);
1716
 
#endif
1717
 
}
1718
 
 
1719
 
static void
1720
 
wsi_x11_get_dri3_modifiers(struct wsi_x11_connection *wsi_conn,
1721
 
                           xcb_connection_t *conn, xcb_window_t window,
1722
 
                           uint8_t depth, uint8_t bpp,
1723
 
                           VkCompositeAlphaFlagsKHR vk_alpha,
1724
 
                           uint64_t **modifiers_in, uint32_t *num_modifiers_in,
1725
 
                           uint32_t *num_tranches_in,
1726
 
                           const VkAllocationCallbacks *pAllocator)
1727
 
{
1728
 
   if (!wsi_conn->has_dri3_modifiers)
1729
 
      goto out;
1730
 
 
1731
 
#ifdef HAVE_DRI3_MODIFIERS
1732
 
   xcb_generic_error_t *error = NULL;
1733
 
   xcb_dri3_get_supported_modifiers_cookie_t mod_cookie =
1734
 
      xcb_dri3_get_supported_modifiers(conn, window, depth, bpp);
1735
 
   xcb_dri3_get_supported_modifiers_reply_t *mod_reply =
1736
 
      xcb_dri3_get_supported_modifiers_reply(conn, mod_cookie, &error);
1737
 
   free(error);
1738
 
 
1739
 
   if (!mod_reply || (mod_reply->num_window_modifiers == 0 &&
1740
 
                      mod_reply->num_screen_modifiers == 0)) {
1741
 
      free(mod_reply);
1742
 
      goto out;
1743
 
   }
1744
 
 
1745
 
   uint32_t n = 0;
1746
 
   uint32_t counts[2];
1747
 
   uint64_t *modifiers[2];
1748
 
 
1749
 
   if (mod_reply->num_window_modifiers) {
1750
 
      counts[n] = mod_reply->num_window_modifiers;
1751
 
      modifiers[n] = vk_alloc(pAllocator,
1752
 
                              counts[n] * sizeof(uint64_t),
1753
 
                              8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1754
 
      if (!modifiers[n]) {
1755
 
         free(mod_reply);
1756
 
         goto out;
1757
 
      }
1758
 
 
1759
 
      memcpy(modifiers[n],
1760
 
             xcb_dri3_get_supported_modifiers_window_modifiers(mod_reply),
1761
 
             counts[n] * sizeof(uint64_t));
1762
 
      n++;
1763
 
   }
1764
 
 
1765
 
   if (mod_reply->num_screen_modifiers) {
1766
 
      counts[n] = mod_reply->num_screen_modifiers;
1767
 
      modifiers[n] = vk_alloc(pAllocator,
1768
 
                              counts[n] * sizeof(uint64_t),
1769
 
                              8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1770
 
      if (!modifiers[n]) {
1771
 
         if (n > 0)
1772
 
            vk_free(pAllocator, modifiers[0]);
1773
 
         free(mod_reply);
1774
 
         goto out;
1775
 
      }
1776
 
 
1777
 
      memcpy(modifiers[n],
1778
 
             xcb_dri3_get_supported_modifiers_screen_modifiers(mod_reply),
1779
 
             counts[n] * sizeof(uint64_t));
1780
 
      n++;
1781
 
   }
1782
 
 
1783
 
   for (int i = 0; i < n; i++) {
1784
 
      modifiers_in[i] = modifiers[i];
1785
 
      num_modifiers_in[i] = counts[i];
1786
 
   }
1787
 
   *num_tranches_in = n;
1788
 
 
1789
 
   free(mod_reply);
1790
 
   return;
1791
 
#endif
1792
 
out:
1793
 
   *num_tranches_in = 0;
1794
 
}
1795
 
 
1796
 
static VkResult
1797
 
x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
1798
 
                      const VkAllocationCallbacks *pAllocator)
1799
 
{
1800
 
   struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
1801
 
   xcb_void_cookie_t cookie;
1802
 
 
1803
 
   if (chain->has_present_queue) {
1804
 
      chain->status = VK_ERROR_OUT_OF_DATE_KHR;
1805
 
      /* Push a UINT32_MAX to wake up the manager */
1806
 
      wsi_queue_push(&chain->present_queue, UINT32_MAX);
1807
 
      pthread_join(chain->queue_manager, NULL);
1808
 
 
1809
 
      if (chain->has_acquire_queue)
1810
 
         wsi_queue_destroy(&chain->acquire_queue);
1811
 
      wsi_queue_destroy(&chain->present_queue);
1812
 
   }
1813
 
 
1814
 
   for (uint32_t i = 0; i < chain->base.image_count; i++)
1815
 
      x11_image_finish(chain, pAllocator, &chain->images[i]);
1816
 
   wsi_destroy_image_info(&chain->base, &chain->base.image_info);
1817
 
 
1818
 
   xcb_unregister_for_special_event(chain->conn, chain->special_event);
1819
 
   cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
1820
 
                                             chain->window,
1821
 
                                             XCB_PRESENT_EVENT_MASK_NO_EVENT);
1822
 
   xcb_discard_reply(chain->conn, cookie.sequence);
1823
 
 
1824
 
   wsi_swapchain_finish(&chain->base);
1825
 
 
1826
 
   vk_free(pAllocator, chain);
1827
 
 
1828
 
   return VK_SUCCESS;
1829
 
}
1830
 
 
1831
 
static void
1832
 
wsi_x11_set_adaptive_sync_property(xcb_connection_t *conn,
1833
 
                                   xcb_drawable_t drawable,
1834
 
                                   uint32_t state)
1835
 
{
1836
 
   static char const name[] = "_VARIABLE_REFRESH";
1837
 
   xcb_intern_atom_cookie_t cookie;
1838
 
   xcb_intern_atom_reply_t* reply;
1839
 
   xcb_void_cookie_t check;
1840
 
 
1841
 
   cookie = xcb_intern_atom(conn, 0, strlen(name), name);
1842
 
   reply = xcb_intern_atom_reply(conn, cookie, NULL);
1843
 
   if (reply == NULL)
1844
 
      return;
1845
 
 
1846
 
   if (state)
1847
 
      check = xcb_change_property_checked(conn, XCB_PROP_MODE_REPLACE,
1848
 
                                          drawable, reply->atom,
1849
 
                                          XCB_ATOM_CARDINAL, 32, 1, &state);
1850
 
   else
1851
 
      check = xcb_delete_property_checked(conn, drawable, reply->atom);
1852
 
 
1853
 
   xcb_discard_reply(conn, check.sequence);
1854
 
   free(reply);
1855
 
}
1856
 
 
1857
 
/**
1858
 
 * Create the swapchain.
1859
 
 *
1860
 
 * Supports immediate, fifo and mailbox presentation mode.
1861
 
 *
1862
 
 */
1863
 
static VkResult
1864
 
x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
1865
 
                             VkDevice device,
1866
 
                             struct wsi_device *wsi_device,
1867
 
                             const VkSwapchainCreateInfoKHR *pCreateInfo,
1868
 
                             const VkAllocationCallbacks* pAllocator,
1869
 
                             struct wsi_swapchain **swapchain_out)
1870
 
{
1871
 
   struct x11_swapchain *chain;
1872
 
   xcb_void_cookie_t cookie;
1873
 
   VkResult result;
1874
 
   VkPresentModeKHR present_mode = wsi_swapchain_get_present_mode(wsi_device, pCreateInfo);
1875
 
 
1876
 
   assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
1877
 
 
1878
 
   /* Get xcb connection from the icd_surface and from that our internal struct
1879
 
    * representing it.
1880
 
    */
1881
 
   xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
1882
 
   struct wsi_x11_connection *wsi_conn =
1883
 
      wsi_x11_get_connection(wsi_device, conn);
1884
 
   if (!wsi_conn)
1885
 
      return VK_ERROR_OUT_OF_HOST_MEMORY;
1886
 
 
1887
 
   /* Get number of images in our swapchain. This count depends on:
1888
 
    * - requested minimal image count
1889
 
    * - device characteristics
1890
 
    * - presentation mode.
1891
 
    */
1892
 
   unsigned num_images = pCreateInfo->minImageCount;
1893
 
   if (wsi_device->x11.strict_imageCount)
1894
 
      num_images = pCreateInfo->minImageCount;
1895
 
   else if (x11_needs_wait_for_fences(wsi_device, wsi_conn, present_mode))
1896
 
      num_images = MAX2(num_images, 5);
1897
 
   else if (wsi_device->x11.ensure_minImageCount)
1898
 
      num_images = MAX2(num_images, x11_get_min_image_count(wsi_device));
1899
 
 
1900
 
   /* Check that we have a window up-front. It is an error to not have one. */
1901
 
   xcb_window_t window = x11_surface_get_window(icd_surface);
1902
 
 
1903
 
   /* Get the geometry of that window. The bit depth of the swapchain will be fitted and the
1904
 
    * chain's images extents should fit it for performance-optimizing flips.
1905
 
    */
1906
 
   xcb_get_geometry_reply_t *geometry =
1907
 
      xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
1908
 
   if (geometry == NULL)
1909
 
      return VK_ERROR_SURFACE_LOST_KHR;
1910
 
   const uint32_t bit_depth = geometry->depth;
1911
 
   const uint16_t cur_width = geometry->width;
1912
 
   const uint16_t cur_height = geometry->height;
1913
 
   free(geometry);
1914
 
 
1915
 
   /* Allocate the actual swapchain. The size depends on image count. */
1916
 
   size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
1917
 
   chain = vk_zalloc(pAllocator, size, 8,
1918
 
                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1919
 
   if (chain == NULL)
1920
 
      return VK_ERROR_OUT_OF_HOST_MEMORY;
1921
 
 
1922
 
   /* When our local device is not compatible with the DRI3 device provided by
1923
 
    * the X server we assume this is a PRIME system.
1924
 
    */
1925
 
   bool use_buffer_blit = false;
1926
 
   if (!wsi_device->sw)
1927
 
      if (!wsi_x11_check_dri3_compatible(wsi_device, conn))
1928
 
         use_buffer_blit = true;
1929
 
 
1930
 
   result = wsi_swapchain_init(wsi_device, &chain->base, device,
1931
 
                               pCreateInfo, pAllocator, use_buffer_blit);
1932
 
   if (result != VK_SUCCESS)
1933
 
      goto fail_alloc;
1934
 
 
1935
 
   chain->base.destroy = x11_swapchain_destroy;
1936
 
   chain->base.get_wsi_image = x11_get_wsi_image;
1937
 
   chain->base.acquire_next_image = x11_acquire_next_image;
1938
 
   chain->base.queue_present = x11_queue_present;
1939
 
   chain->base.present_mode = present_mode;
1940
 
   chain->base.image_count = num_images;
1941
 
   chain->conn = conn;
1942
 
   chain->window = window;
1943
 
   chain->depth = bit_depth;
1944
 
   chain->extent = pCreateInfo->imageExtent;
1945
 
   chain->send_sbc = 0;
1946
 
   chain->sent_image_count = 0;
1947
 
   chain->last_present_msc = 0;
1948
 
   chain->has_acquire_queue = false;
1949
 
   chain->has_present_queue = false;
1950
 
   chain->status = VK_SUCCESS;
1951
 
   chain->has_dri3_modifiers = wsi_conn->has_dri3_modifiers;
1952
 
   chain->has_mit_shm = wsi_conn->has_mit_shm;
1953
 
 
1954
 
   /* When images in the swapchain don't fit the window, X can still present them, but it won't
1955
 
    * happen by flip, only by copy. So this is a suboptimal copy, because if the client would change
1956
 
    * the chain extents X may be able to flip
1957
 
    */
1958
 
   if (chain->extent.width != cur_width || chain->extent.height != cur_height)
1959
 
       chain->status = VK_SUBOPTIMAL_KHR;
1960
 
 
1961
 
   /* On a new swapchain this helper variable is set to false. Once we present it will have an
1962
 
    * impact once we ever do at least one flip and go back to copying afterwards. It is presumed
1963
 
    * that in this case here is a high likelihood X could do flips again if the client reallocates a
1964
 
    * new swapchain.
1965
 
    *
1966
 
    * Note that we used to inheritted this property from 'pCreateInfo->oldSwapchain'. But when it
1967
 
    * was true, and when the next present was completed with copying, we would return
1968
 
    * VK_SUBOPTIMAL_KHR and hint the app to reallocate again for no good reason. If all following
1969
 
    * presents on the surface were completed with copying because of some surface state change, we
1970
 
    * would always return VK_SUBOPTIMAL_KHR no matter how many times the app had reallocated.
1971
 
    *
1972
 
    * Note also that is is questionable in general if that mechanism is really useful. It ist not
1973
 
    * clear why on a change from flipping to copying we can assume a reallocation has a high chance
1974
 
    * of making flips work again per se. In other words it is not clear why there is need for
1975
 
    * another way to inform clients about suboptimal copies besides forwarding the
1976
 
    * 'PresentOptionSuboptimal' complete mode.
1977
 
    */
1978
 
   chain->copy_is_suboptimal = false;
1979
 
 
1980
 
   /* For our swapchain we need to listen to following Present extension events:
1981
 
    * - Configure: Window dimensions changed. Images in the swapchain might need
1982
 
    *              to be reallocated.
1983
 
    * - Complete: An image from our swapchain was presented on the output.
1984
 
    * - Idle: An image from our swapchain is not anymore accessed by the X
1985
 
    *         server and can be reused.
1986
 
    */
1987
 
   chain->event_id = xcb_generate_id(chain->conn);
1988
 
   xcb_present_select_input(chain->conn, chain->event_id, chain->window,
1989
 
                            XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
1990
 
                            XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
1991
 
                            XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
1992
 
 
1993
 
   /* Create an XCB event queue to hold present events outside of the usual
1994
 
    * application event queue
1995
 
    */
1996
 
   chain->special_event =
1997
 
      xcb_register_for_special_xge(chain->conn, &xcb_present_id,
1998
 
                                   chain->event_id, NULL);
1999
 
 
2000
 
   /* Create the graphics context. */
2001
 
   chain->gc = xcb_generate_id(chain->conn);
2002
 
   if (!chain->gc) {
2003
 
      /* FINISHME: Choose a better error. */
2004
 
      result = VK_ERROR_OUT_OF_HOST_MEMORY;
2005
 
      goto fail_register;
2006
 
   }
2007
 
 
2008
 
   cookie = xcb_create_gc(chain->conn,
2009
 
                          chain->gc,
2010
 
                          chain->window,
2011
 
                          XCB_GC_GRAPHICS_EXPOSURES,
2012
 
                          (uint32_t []) { 0 });
2013
 
   xcb_discard_reply(chain->conn, cookie.sequence);
2014
 
 
2015
 
   uint64_t *modifiers[2] = {NULL, NULL};
2016
 
   uint32_t num_modifiers[2] = {0, 0};
2017
 
   uint32_t num_tranches = 0;
2018
 
   if (wsi_device->supports_modifiers)
2019
 
      wsi_x11_get_dri3_modifiers(wsi_conn, conn, window, chain->depth, 32,
2020
 
                                 pCreateInfo->compositeAlpha,
2021
 
                                 modifiers, num_modifiers, &num_tranches,
2022
 
                                 pAllocator);
2023
 
 
2024
 
   if (chain->base.use_buffer_blit) {
2025
 
      bool use_modifier = num_tranches > 0;
2026
 
      result = wsi_configure_prime_image(&chain->base, pCreateInfo,
2027
 
                                         use_modifier,
2028
 
                                         &chain->base.image_info);
2029
 
   } else {
2030
 
      result = wsi_configure_native_image(&chain->base, pCreateInfo,
2031
 
                                          num_tranches, num_modifiers,
2032
 
                                          (const uint64_t *const *)modifiers,
2033
 
                                          chain->has_mit_shm ? &alloc_shm : NULL,
2034
 
                                          &chain->base.image_info);
2035
 
   }
2036
 
   if (result != VK_SUCCESS)
2037
 
      goto fail_modifiers;
2038
 
 
2039
 
   uint32_t image = 0;
2040
 
   for (; image < chain->base.image_count; image++) {
2041
 
      result = x11_image_init(device, chain, pCreateInfo, pAllocator,
2042
 
                              &chain->images[image]);
2043
 
      if (result != VK_SUCCESS)
2044
 
         goto fail_init_images;
2045
 
   }
2046
 
 
2047
 
   /* Initialize queues for images in our swapchain. Possible queues are:
2048
 
    * - Present queue: for images sent to the X server but not yet presented.
2049
 
    * - Acquire queue: for images already presented but not yet released by the
2050
 
    *                  X server.
2051
 
    *
2052
 
    * In general queues are not used on software drivers, otherwise which queues
2053
 
    * are used depends on our presentation mode:
2054
 
    * - Fifo: present and acquire
2055
 
    * - Mailbox: present only
2056
 
    * - Immediate: present when we wait on fences before buffer submission (Xwayland)
2057
 
    */
2058
 
   if ((chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2059
 
        chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR ||
2060
 
        x11_needs_wait_for_fences(wsi_device, wsi_conn,
2061
 
                                  chain->base.present_mode)) &&
2062
 
       !chain->base.wsi->sw) {
2063
 
      chain->has_present_queue = true;
2064
 
 
2065
 
      /* The queues have a length of base.image_count + 1 because we will
2066
 
       * occasionally use UINT32_MAX to signal the other thread that an error
2067
 
       * has occurred and we don't want an overflow.
2068
 
       */
2069
 
      int ret;
2070
 
      ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
2071
 
      if (ret) {
2072
 
         goto fail_init_images;
2073
 
      }
2074
 
 
2075
 
      if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR ||
2076
 
          chain->base.present_mode == VK_PRESENT_MODE_FIFO_RELAXED_KHR) {
2077
 
         chain->has_acquire_queue = true;
2078
 
 
2079
 
         ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
2080
 
         if (ret) {
2081
 
            wsi_queue_destroy(&chain->present_queue);
2082
 
            goto fail_init_images;
2083
 
         }
2084
 
 
2085
 
         for (unsigned i = 0; i < chain->base.image_count; i++)
2086
 
            wsi_queue_push(&chain->acquire_queue, i);
2087
 
      }
2088
 
 
2089
 
      ret = pthread_create(&chain->queue_manager, NULL,
2090
 
                           x11_manage_fifo_queues, chain);
2091
 
      if (ret) {
2092
 
         wsi_queue_destroy(&chain->present_queue);
2093
 
         if (chain->has_acquire_queue)
2094
 
            wsi_queue_destroy(&chain->acquire_queue);
2095
 
 
2096
 
         goto fail_init_images;
2097
 
      }
2098
 
   }
2099
 
 
2100
 
   assert(chain->has_present_queue || !chain->has_acquire_queue);
2101
 
 
2102
 
   for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2103
 
      vk_free(pAllocator, modifiers[i]);
2104
 
 
2105
 
   /* It is safe to set it here as only one swapchain can be associated with
2106
 
    * the window, and swapchain creation does the association. At this point
2107
 
    * we know the creation is going to succeed. */
2108
 
   wsi_x11_set_adaptive_sync_property(conn, window,
2109
 
                                      wsi_device->enable_adaptive_sync);
2110
 
 
2111
 
   *swapchain_out = &chain->base;
2112
 
 
2113
 
   return VK_SUCCESS;
2114
 
 
2115
 
fail_init_images:
2116
 
   for (uint32_t j = 0; j < image; j++)
2117
 
      x11_image_finish(chain, pAllocator, &chain->images[j]);
2118
 
 
2119
 
   wsi_destroy_image_info(&chain->base, &chain->base.image_info);
2120
 
 
2121
 
fail_modifiers:
2122
 
   for (int i = 0; i < ARRAY_SIZE(modifiers); i++)
2123
 
      vk_free(pAllocator, modifiers[i]);
2124
 
 
2125
 
fail_register:
2126
 
   xcb_unregister_for_special_event(chain->conn, chain->special_event);
2127
 
 
2128
 
   wsi_swapchain_finish(&chain->base);
2129
 
 
2130
 
fail_alloc:
2131
 
   vk_free(pAllocator, chain);
2132
 
 
2133
 
   return result;
2134
 
}
2135
 
 
2136
 
VkResult
2137
 
wsi_x11_init_wsi(struct wsi_device *wsi_device,
2138
 
                 const VkAllocationCallbacks *alloc,
2139
 
                 const struct driOptionCache *dri_options)
2140
 
{
2141
 
   struct wsi_x11 *wsi;
2142
 
   VkResult result;
2143
 
 
2144
 
   wsi = vk_alloc(alloc, sizeof(*wsi), 8,
2145
 
                   VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
2146
 
   if (!wsi) {
2147
 
      result = VK_ERROR_OUT_OF_HOST_MEMORY;
2148
 
      goto fail;
2149
 
   }
2150
 
 
2151
 
   int ret = pthread_mutex_init(&wsi->mutex, NULL);
2152
 
   if (ret != 0) {
2153
 
      if (ret == ENOMEM) {
2154
 
         result = VK_ERROR_OUT_OF_HOST_MEMORY;
2155
 
      } else {
2156
 
         /* FINISHME: Choose a better error. */
2157
 
         result = VK_ERROR_OUT_OF_HOST_MEMORY;
2158
 
      }
2159
 
 
2160
 
      goto fail_alloc;
2161
 
   }
2162
 
 
2163
 
   wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
2164
 
                                              _mesa_key_pointer_equal);
2165
 
   if (!wsi->connections) {
2166
 
      result = VK_ERROR_OUT_OF_HOST_MEMORY;
2167
 
      goto fail_mutex;
2168
 
   }
2169
 
 
2170
 
   if (dri_options) {
2171
 
      if (driCheckOption(dri_options, "vk_x11_override_min_image_count", DRI_INT)) {
2172
 
         wsi_device->x11.override_minImageCount =
2173
 
            driQueryOptioni(dri_options, "vk_x11_override_min_image_count");
2174
 
      }
2175
 
      if (driCheckOption(dri_options, "vk_x11_strict_image_count", DRI_BOOL)) {
2176
 
         wsi_device->x11.strict_imageCount =
2177
 
            driQueryOptionb(dri_options, "vk_x11_strict_image_count");
2178
 
      }
2179
 
      if (driCheckOption(dri_options, "vk_x11_ensure_min_image_count", DRI_BOOL)) {
2180
 
         wsi_device->x11.ensure_minImageCount =
2181
 
            driQueryOptionb(dri_options, "vk_x11_ensure_min_image_count");
2182
 
      }
2183
 
      wsi_device->x11.xwaylandWaitReady = true;
2184
 
      if (driCheckOption(dri_options, "vk_xwayland_wait_ready", DRI_BOOL)) {
2185
 
         wsi_device->x11.xwaylandWaitReady =
2186
 
            driQueryOptionb(dri_options, "vk_xwayland_wait_ready");
2187
 
      }
2188
 
   }
2189
 
 
2190
 
   wsi->base.get_support = x11_surface_get_support;
2191
 
   wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
2192
 
   wsi->base.get_formats = x11_surface_get_formats;
2193
 
   wsi->base.get_formats2 = x11_surface_get_formats2;
2194
 
   wsi->base.get_present_modes = x11_surface_get_present_modes;
2195
 
   wsi->base.get_present_rectangles = x11_surface_get_present_rectangles;
2196
 
   wsi->base.create_swapchain = x11_surface_create_swapchain;
2197
 
 
2198
 
   wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
2199
 
   wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
2200
 
 
2201
 
   return VK_SUCCESS;
2202
 
 
2203
 
fail_mutex:
2204
 
   pthread_mutex_destroy(&wsi->mutex);
2205
 
fail_alloc:
2206
 
   vk_free(alloc, wsi);
2207
 
fail:
2208
 
   wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
2209
 
   wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
2210
 
 
2211
 
   return result;
2212
 
}
2213
 
 
2214
 
void
2215
 
wsi_x11_finish_wsi(struct wsi_device *wsi_device,
2216
 
                   const VkAllocationCallbacks *alloc)
2217
 
{
2218
 
   struct wsi_x11 *wsi =
2219
 
      (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
2220
 
 
2221
 
   if (wsi) {
2222
 
      hash_table_foreach(wsi->connections, entry)
2223
 
         wsi_x11_connection_destroy(wsi_device, entry->data);
2224
 
 
2225
 
      _mesa_hash_table_destroy(wsi->connections, NULL);
2226
 
 
2227
 
      pthread_mutex_destroy(&wsi->mutex);
2228
 
 
2229
 
      vk_free(alloc, wsi);
2230
 
   }
2231
 
}