2
This file is part of PulseAudio.
4
Copyright 2004-2006 Lennart Poettering
6
PulseAudio is free software; you can redistribute it and/or modify
7
it under the terms of the GNU Lesser General Public License as published
8
by the Free Software Foundation; either version 2.1 of the License,
9
or (at your option) any later version.
11
PulseAudio is distributed in the hope that it will be useful, but
12
WITHOUT ANY WARRANTY; without even the implied warranty of
13
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
General Public License for more details.
16
You should have received a copy of the GNU Lesser General Public License
17
along with PulseAudio; if not, see <http://www.gnu.org/licenses/>.
28
#include <pulse/xmalloc.h>
30
#include <pulsecore/log.h>
31
#include <pulsecore/mcalign.h>
32
#include <pulsecore/macro.h>
33
#include <pulsecore/flist.h>
35
#include "memblockq.h"
37
/* #define MEMBLOCKQ_DEBUG */
40
struct list_item *next, *prev;
45
PA_STATIC_FLIST_DECLARE(list_items, 0, pa_xfree);
48
struct list_item *blocks, *blocks_tail;
49
struct list_item *current_read, *current_write;
51
size_t maxlength, tlength, base, prebuf, minreq, maxrewind;
52
int64_t read_index, write_index;
56
int64_t missing, requested;
58
pa_sample_spec sample_spec;
61
pa_memblockq* pa_memblockq_new(
66
const pa_sample_spec *sample_spec,
70
pa_memchunk *silence) {
74
pa_assert(sample_spec);
77
bq = pa_xnew0(pa_memblockq, 1);
78
bq->name = pa_xstrdup(name);
80
bq->sample_spec = *sample_spec;
81
bq->base = pa_frame_size(sample_spec);
82
bq->read_index = bq->write_index = idx;
84
pa_log_debug("memblockq requested: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
85
(unsigned long) maxlength, (unsigned long) tlength, (unsigned long) bq->base, (unsigned long) prebuf, (unsigned long) minreq, (unsigned long) maxrewind);
89
pa_memblockq_set_maxlength(bq, maxlength);
90
pa_memblockq_set_tlength(bq, tlength);
91
pa_memblockq_set_minreq(bq, minreq);
92
pa_memblockq_set_prebuf(bq, prebuf);
93
pa_memblockq_set_maxrewind(bq, maxrewind);
95
pa_log_debug("memblockq sanitized: maxlength=%lu, tlength=%lu, base=%lu, prebuf=%lu, minreq=%lu maxrewind=%lu",
96
(unsigned long) bq->maxlength, (unsigned long) bq->tlength, (unsigned long) bq->base, (unsigned long) bq->prebuf, (unsigned long) bq->minreq, (unsigned long) bq->maxrewind);
99
bq->silence = *silence;
100
pa_memblock_ref(bq->silence.memblock);
103
bq->mcalign = pa_mcalign_new(bq->base);
108
void pa_memblockq_free(pa_memblockq* bq) {
111
pa_memblockq_silence(bq);
113
if (bq->silence.memblock)
114
pa_memblock_unref(bq->silence.memblock);
117
pa_mcalign_free(bq->mcalign);
123
static void fix_current_read(pa_memblockq *bq) {
126
if (PA_UNLIKELY(!bq->blocks)) {
127
bq->current_read = NULL;
131
if (PA_UNLIKELY(!bq->current_read))
132
bq->current_read = bq->blocks;
135
while (PA_UNLIKELY(bq->current_read->index > bq->read_index))
137
if (bq->current_read->prev)
138
bq->current_read = bq->current_read->prev;
143
while (PA_LIKELY(bq->current_read != NULL) && PA_UNLIKELY(bq->current_read->index + (int64_t) bq->current_read->chunk.length <= bq->read_index))
144
bq->current_read = bq->current_read->next;
146
/* At this point current_read will either point at or left of the
147
next block to play. It may be NULL in case everything in
148
the queue was already played */
151
static void fix_current_write(pa_memblockq *bq) {
154
if (PA_UNLIKELY(!bq->blocks)) {
155
bq->current_write = NULL;
159
if (PA_UNLIKELY(!bq->current_write))
160
bq->current_write = bq->blocks_tail;
163
while (PA_UNLIKELY(bq->current_write->index + (int64_t) bq->current_write->chunk.length <= bq->write_index))
165
if (bq->current_write->next)
166
bq->current_write = bq->current_write->next;
171
while (PA_LIKELY(bq->current_write != NULL) && PA_UNLIKELY(bq->current_write->index > bq->write_index))
172
bq->current_write = bq->current_write->prev;
174
/* At this point current_write will either point at or right of
175
the next block to write data to. It may be NULL in case
176
everything in the queue is still to be played */
179
static void drop_block(pa_memblockq *bq, struct list_item *q) {
183
pa_assert(bq->n_blocks >= 1);
186
q->prev->next = q->next;
188
pa_assert(bq->blocks == q);
189
bq->blocks = q->next;
193
q->next->prev = q->prev;
195
pa_assert(bq->blocks_tail == q);
196
bq->blocks_tail = q->prev;
199
if (bq->current_write == q)
200
bq->current_write = q->prev;
202
if (bq->current_read == q)
203
bq->current_read = q->next;
205
pa_memblock_unref(q->chunk.memblock);
207
if (pa_flist_push(PA_STATIC_FLIST_GET(list_items), q) < 0)
213
static void drop_backlog(pa_memblockq *bq) {
217
boundary = bq->read_index - (int64_t) bq->maxrewind;
219
while (bq->blocks && (bq->blocks->index + (int64_t) bq->blocks->chunk.length <= boundary))
220
drop_block(bq, bq->blocks);
223
static bool can_push(pa_memblockq *bq, size_t l) {
228
if (bq->read_index > bq->write_index) {
229
int64_t d = bq->read_index - bq->write_index;
237
end = bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->write_index;
239
/* Make sure that the list doesn't get too long */
240
if (bq->write_index + (int64_t) l > end)
241
if (bq->write_index + (int64_t) l - bq->read_index > (int64_t) bq->maxlength)
247
static void write_index_changed(pa_memblockq *bq, int64_t old_write_index, bool account) {
252
delta = bq->write_index - old_write_index;
255
bq->requested -= delta;
257
bq->missing -= delta;
259
#ifdef MEMBLOCKQ_DEBUG
260
pa_log_debug("[%s] pushed/seeked %lli: requested counter at %lli, account=%i", bq->name, (long long) delta, (long long) bq->requested, account);
264
static void read_index_changed(pa_memblockq *bq, int64_t old_read_index) {
269
delta = bq->read_index - old_read_index;
270
bq->missing += delta;
272
#ifdef MEMBLOCKQ_DEBUG
273
pa_log_debug("[%s] popped %lli: missing counter at %lli", bq->name, (long long) delta, (long long) bq->missing);
277
int pa_memblockq_push(pa_memblockq* bq, const pa_memchunk *uchunk) {
278
struct list_item *q, *n;
284
pa_assert(uchunk->memblock);
285
pa_assert(uchunk->length > 0);
286
pa_assert(uchunk->index + uchunk->length <= pa_memblock_get_length(uchunk->memblock));
288
pa_assert(uchunk->length % bq->base == 0);
289
pa_assert(uchunk->index % bq->base == 0);
291
if (!can_push(bq, uchunk->length))
294
old = bq->write_index;
297
fix_current_write(bq);
298
q = bq->current_write;
300
/* First we advance the q pointer right of where we want to
304
while (bq->write_index + (int64_t) chunk.length > q->index)
314
/* We go from back to front to look for the right place to add
315
* this new entry. Drop data we will overwrite on the way */
319
if (bq->write_index >= q->index + (int64_t) q->chunk.length)
320
/* We found the entry where we need to place the new entry immediately after */
322
else if (bq->write_index + (int64_t) chunk.length <= q->index) {
323
/* This entry isn't touched at all, let's skip it */
325
} else if (bq->write_index <= q->index &&
326
bq->write_index + (int64_t) chunk.length >= q->index + (int64_t) q->chunk.length) {
328
/* This entry is fully replaced by the new entry, so let's drop it */
334
} else if (bq->write_index >= q->index) {
335
/* The write index points into this memblock, so let's
336
* truncate or split it */
338
if (bq->write_index + (int64_t) chunk.length < q->index + (int64_t) q->chunk.length) {
340
/* We need to save the end of this memchunk */
344
/* Create a new list entry for the end of the memchunk */
345
if (!(p = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
346
p = pa_xnew(struct list_item, 1);
349
pa_memblock_ref(p->chunk.memblock);
351
/* Calculate offset */
352
d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
355
/* Drop it from the new entry */
356
p->index = q->index + (int64_t) d;
357
p->chunk.length -= d;
359
/* Add it to the list */
361
if ((p->next = q->next))
370
/* Truncate the chunk */
371
if (!(q->chunk.length = (size_t) (bq->write_index - q->index))) {
378
/* We had to truncate this block, hence we're now at the right position */
383
pa_assert(bq->write_index + (int64_t)chunk.length > q->index &&
384
bq->write_index + (int64_t)chunk.length < q->index + (int64_t)q->chunk.length &&
385
bq->write_index < q->index);
387
/* The job overwrites the current entry at the end, so let's drop the beginning of this entry */
389
d = (size_t) (bq->write_index + (int64_t) chunk.length - q->index);
390
q->index += (int64_t) d;
392
q->chunk.length -= d;
399
pa_assert(bq->write_index >= q->index + (int64_t)q->chunk.length);
400
pa_assert(!q->next || (bq->write_index + (int64_t)chunk.length <= q->next->index));
402
/* Try to merge memory blocks */
404
if (q->chunk.memblock == chunk.memblock &&
405
q->chunk.index + q->chunk.length == chunk.index &&
406
bq->write_index == q->index + (int64_t) q->chunk.length) {
408
q->chunk.length += chunk.length;
409
bq->write_index += (int64_t) chunk.length;
413
pa_assert(!bq->blocks || (bq->write_index + (int64_t)chunk.length <= bq->blocks->index));
415
if (!(n = pa_flist_pop(PA_STATIC_FLIST_GET(list_items))))
416
n = pa_xnew(struct list_item, 1);
419
pa_memblock_ref(n->chunk.memblock);
420
n->index = bq->write_index;
421
bq->write_index += (int64_t) n->chunk.length;
423
n->next = q ? q->next : bq->blocks;
440
write_index_changed(bq, old, true);
444
bool pa_memblockq_prebuf_active(pa_memblockq *bq) {
448
return pa_memblockq_get_length(bq) < bq->prebuf;
450
return bq->prebuf > 0 && bq->read_index >= bq->write_index;
453
static bool update_prebuf(pa_memblockq *bq) {
458
if (pa_memblockq_get_length(bq) < bq->prebuf)
461
bq->in_prebuf = false;
465
if (bq->prebuf > 0 && bq->read_index >= bq->write_index) {
466
bq->in_prebuf = true;
474
int pa_memblockq_peek(pa_memblockq* bq, pa_memchunk *chunk) {
479
/* We need to pre-buffer */
480
if (update_prebuf(bq))
483
fix_current_read(bq);
485
/* Do we need to spit out silence? */
486
if (!bq->current_read || bq->current_read->index > bq->read_index) {
489
/* How much silence shall we return? */
490
if (bq->current_read)
491
length = (size_t) (bq->current_read->index - bq->read_index);
492
else if (bq->write_index > bq->read_index)
493
length = (size_t) (bq->write_index - bq->read_index);
497
/* We need to return silence, since no data is yet available */
498
if (bq->silence.memblock) {
499
*chunk = bq->silence;
500
pa_memblock_ref(chunk->memblock);
502
if (length > 0 && length < chunk->length)
503
chunk->length = length;
507
/* If the memblockq is empty, return -1, otherwise return
508
* the time to sleep */
512
chunk->memblock = NULL;
513
chunk->length = length;
520
/* Ok, let's pass real data to the caller */
521
*chunk = bq->current_read->chunk;
522
pa_memblock_ref(chunk->memblock);
524
pa_assert(bq->read_index >= bq->current_read->index);
525
d = bq->read_index - bq->current_read->index;
526
chunk->index += (size_t) d;
527
chunk->length -= (size_t) d;
532
int pa_memblockq_peek_fixed_size(pa_memblockq *bq, size_t block_size, pa_memchunk *chunk) {
533
pa_memchunk tchunk, rchunk;
535
struct list_item *item;
538
pa_assert(block_size > 0);
540
pa_assert(bq->silence.memblock);
542
if (pa_memblockq_peek(bq, &tchunk) < 0)
545
if (tchunk.length >= block_size) {
547
chunk->length = block_size;
551
rchunk.memblock = pa_memblock_new(pa_memblock_get_pool(tchunk.memblock), block_size);
553
rchunk.length = tchunk.length;
555
pa_memchunk_memcpy(&rchunk, &tchunk);
556
pa_memblock_unref(tchunk.memblock);
558
rchunk.index += tchunk.length;
560
/* We don't need to call fix_current_read() here, since
561
* pa_memblock_peek() already did that */
562
item = bq->current_read;
563
ri = bq->read_index + tchunk.length;
565
while (rchunk.index < block_size) {
567
if (!item || item->index > ri) {
568
/* Do we need to append silence? */
569
tchunk = bq->silence;
572
tchunk.length = PA_MIN(tchunk.length, (size_t) (item->index - ri));
577
/* We can append real data! */
578
tchunk = item->chunk;
580
d = ri - item->index;
581
tchunk.index += (size_t) d;
582
tchunk.length -= (size_t) d;
584
/* Go to next item for the next iteration */
588
rchunk.length = tchunk.length = PA_MIN(tchunk.length, block_size - rchunk.index);
589
pa_memchunk_memcpy(&rchunk, &tchunk);
591
rchunk.index += rchunk.length;
596
rchunk.length = block_size;
602
void pa_memblockq_drop(pa_memblockq *bq, size_t length) {
605
pa_assert(length % bq->base == 0);
607
old = bq->read_index;
611
/* Do not drop any data when we are in prebuffering mode */
612
if (update_prebuf(bq))
615
fix_current_read(bq);
617
if (bq->current_read) {
620
/* We go through this piece by piece to make sure we don't
621
* drop more than allowed by prebuf */
623
p = bq->current_read->index + (int64_t) bq->current_read->chunk.length;
624
pa_assert(p >= bq->read_index);
625
d = p - bq->read_index;
627
if (d > (int64_t) length)
628
d = (int64_t) length;
631
length -= (size_t) d;
635
/* The list is empty, there's nothing we could drop */
636
bq->read_index += (int64_t) length;
642
read_index_changed(bq, old);
645
void pa_memblockq_rewind(pa_memblockq *bq, size_t length) {
648
pa_assert(length % bq->base == 0);
650
old = bq->read_index;
652
/* This is kind of the inverse of pa_memblockq_drop() */
654
bq->read_index -= (int64_t) length;
656
read_index_changed(bq, old);
659
bool pa_memblockq_is_readable(pa_memblockq *bq) {
662
if (pa_memblockq_prebuf_active(bq))
665
if (pa_memblockq_get_length(bq) <= 0)
671
size_t pa_memblockq_get_length(pa_memblockq *bq) {
674
if (bq->write_index <= bq->read_index)
677
return (size_t) (bq->write_index - bq->read_index);
680
size_t pa_memblockq_missing(pa_memblockq *bq) {
684
if ((l = pa_memblockq_get_length(bq)) >= bq->tlength)
689
return l >= bq->minreq ? l : 0;
692
void pa_memblockq_seek(pa_memblockq *bq, int64_t offset, pa_seek_mode_t seek, bool account) {
696
old = bq->write_index;
699
case PA_SEEK_RELATIVE:
700
bq->write_index += offset;
702
case PA_SEEK_ABSOLUTE:
703
bq->write_index = offset;
705
case PA_SEEK_RELATIVE_ON_READ:
706
bq->write_index = bq->read_index + offset;
708
case PA_SEEK_RELATIVE_END:
709
bq->write_index = (bq->blocks_tail ? bq->blocks_tail->index + (int64_t) bq->blocks_tail->chunk.length : bq->read_index) + offset;
712
pa_assert_not_reached();
716
write_index_changed(bq, old, account);
719
void pa_memblockq_flush_write(pa_memblockq *bq, bool account) {
723
pa_memblockq_silence(bq);
725
old = bq->write_index;
726
bq->write_index = bq->read_index;
728
pa_memblockq_prebuf_force(bq);
729
write_index_changed(bq, old, account);
732
void pa_memblockq_flush_read(pa_memblockq *bq) {
736
pa_memblockq_silence(bq);
738
old = bq->read_index;
739
bq->read_index = bq->write_index;
741
pa_memblockq_prebuf_force(bq);
742
read_index_changed(bq, old);
745
size_t pa_memblockq_get_tlength(pa_memblockq *bq) {
751
size_t pa_memblockq_get_minreq(pa_memblockq *bq) {
757
size_t pa_memblockq_get_maxrewind(pa_memblockq *bq) {
760
return bq->maxrewind;
763
int64_t pa_memblockq_get_read_index(pa_memblockq *bq) {
766
return bq->read_index;
769
int64_t pa_memblockq_get_write_index(pa_memblockq *bq) {
772
return bq->write_index;
775
int pa_memblockq_push_align(pa_memblockq* bq, const pa_memchunk *chunk) {
782
return pa_memblockq_push(bq, chunk);
784
if (!can_push(bq, pa_mcalign_csize(bq->mcalign, chunk->length)))
787
pa_mcalign_push(bq->mcalign, chunk);
789
while (pa_mcalign_pop(bq->mcalign, &rchunk) >= 0) {
791
r = pa_memblockq_push(bq, &rchunk);
792
pa_memblock_unref(rchunk.memblock);
795
pa_mcalign_flush(bq->mcalign);
803
void pa_memblockq_prebuf_disable(pa_memblockq *bq) {
806
bq->in_prebuf = false;
809
void pa_memblockq_prebuf_force(pa_memblockq *bq) {
813
bq->in_prebuf = true;
816
size_t pa_memblockq_get_maxlength(pa_memblockq *bq) {
819
return bq->maxlength;
822
size_t pa_memblockq_get_prebuf(pa_memblockq *bq) {
828
size_t pa_memblockq_pop_missing(pa_memblockq *bq) {
833
#ifdef MEMBLOCKQ_DEBUG
834
pa_log_debug("[%s] pop: %lli", bq->name, (long long) bq->missing);
837
if (bq->missing <= 0)
840
l = (size_t) bq->missing;
842
bq->requested += bq->missing;
845
#ifdef MEMBLOCKQ_DEBUG
846
pa_log_debug("[%s] sent %lli: request counter is at %lli", bq->name, (long long) l, (long long) bq->requested);
852
void pa_memblockq_set_maxlength(pa_memblockq *bq, size_t maxlength) {
855
bq->maxlength = ((maxlength+bq->base-1)/bq->base)*bq->base;
857
if (bq->maxlength < bq->base)
858
bq->maxlength = bq->base;
860
if (bq->tlength > bq->maxlength)
861
pa_memblockq_set_tlength(bq, bq->maxlength);
864
void pa_memblockq_set_tlength(pa_memblockq *bq, size_t tlength) {
868
if (tlength <= 0 || tlength == (size_t) -1)
869
tlength = bq->maxlength;
871
old_tlength = bq->tlength;
872
bq->tlength = ((tlength+bq->base-1)/bq->base)*bq->base;
874
if (bq->tlength > bq->maxlength)
875
bq->tlength = bq->maxlength;
877
if (bq->minreq > bq->tlength)
878
pa_memblockq_set_minreq(bq, bq->tlength);
880
if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
881
pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
883
bq->missing += (int64_t) bq->tlength - (int64_t) old_tlength;
886
void pa_memblockq_set_minreq(pa_memblockq *bq, size_t minreq) {
889
bq->minreq = (minreq/bq->base)*bq->base;
891
if (bq->minreq > bq->tlength)
892
bq->minreq = bq->tlength;
894
if (bq->minreq < bq->base)
895
bq->minreq = bq->base;
897
if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
898
pa_memblockq_set_prebuf(bq, bq->tlength+bq->base-bq->minreq);
901
void pa_memblockq_set_prebuf(pa_memblockq *bq, size_t prebuf) {
904
if (prebuf == (size_t) -1)
905
prebuf = bq->tlength+bq->base-bq->minreq;
907
bq->prebuf = ((prebuf+bq->base-1)/bq->base)*bq->base;
909
if (prebuf > 0 && bq->prebuf < bq->base)
910
bq->prebuf = bq->base;
912
if (bq->prebuf > bq->tlength+bq->base-bq->minreq)
913
bq->prebuf = bq->tlength+bq->base-bq->minreq;
915
if (bq->prebuf <= 0 || pa_memblockq_get_length(bq) >= bq->prebuf)
916
bq->in_prebuf = false;
919
void pa_memblockq_set_maxrewind(pa_memblockq *bq, size_t maxrewind) {
922
bq->maxrewind = (maxrewind/bq->base)*bq->base;
925
void pa_memblockq_apply_attr(pa_memblockq *bq, const pa_buffer_attr *a) {
929
pa_memblockq_set_maxlength(bq, a->maxlength);
930
pa_memblockq_set_tlength(bq, a->tlength);
931
pa_memblockq_set_minreq(bq, a->minreq);
932
pa_memblockq_set_prebuf(bq, a->prebuf);
935
void pa_memblockq_get_attr(pa_memblockq *bq, pa_buffer_attr *a) {
939
a->maxlength = (uint32_t) pa_memblockq_get_maxlength(bq);
940
a->tlength = (uint32_t) pa_memblockq_get_tlength(bq);
941
a->prebuf = (uint32_t) pa_memblockq_get_prebuf(bq);
942
a->minreq = (uint32_t) pa_memblockq_get_minreq(bq);
945
int pa_memblockq_splice(pa_memblockq *bq, pa_memblockq *source) {
950
pa_memblockq_prebuf_disable(bq);
955
if (pa_memblockq_peek(source, &chunk) < 0)
958
pa_assert(chunk.length > 0);
960
if (chunk.memblock) {
962
if (pa_memblockq_push_align(bq, &chunk) < 0) {
963
pa_memblock_unref(chunk.memblock);
967
pa_memblock_unref(chunk.memblock);
969
pa_memblockq_seek(bq, (int64_t) chunk.length, PA_SEEK_RELATIVE, true);
971
pa_memblockq_drop(bq, chunk.length);
975
void pa_memblockq_willneed(pa_memblockq *bq) {
980
fix_current_read(bq);
982
for (q = bq->current_read; q; q = q->next)
983
pa_memchunk_will_need(&q->chunk);
986
void pa_memblockq_set_silence(pa_memblockq *bq, pa_memchunk *silence) {
989
if (bq->silence.memblock)
990
pa_memblock_unref(bq->silence.memblock);
993
bq->silence = *silence;
994
pa_memblock_ref(bq->silence.memblock);
996
pa_memchunk_reset(&bq->silence);
999
bool pa_memblockq_is_empty(pa_memblockq *bq) {
1005
void pa_memblockq_silence(pa_memblockq *bq) {
1009
drop_block(bq, bq->blocks);
1011
pa_assert(bq->n_blocks == 0);
1014
unsigned pa_memblockq_get_nblocks(pa_memblockq *bq) {
1017
return bq->n_blocks;
1020
size_t pa_memblockq_get_base(pa_memblockq *bq) {