~alan-griffiths/mir/workaround-lp1602199

« back to all changes in this revision

Viewing changes to src/server/compositor/stream.cpp

  • Committer: Tarmac
  • Author(s): Kevin DuBois
  • Date: 2016-05-03 09:49:54 UTC
  • mfrom: (3369.2.31 server-side-alloc)
  • Revision ID: tarmac-20160503094954-6l9jtr20mydxs3l7
server: Change point of frontend-facing buffer allocation from the mf::BufferStream to the msc::Session to prepare for NBS connection-allocated buffers.

We were previously allocating from the streams, but to accommodate mir_connection_allocate_buffer(), we will have to allocate via msc::Session.

This exposes mf::ClientBuffers(), as the streams need to reflect that buffers came from the same Session/connection. (cross-connection/cross-process mirclient-allocated-buffers are not supported). render_surfaces has an example of a 'floating' shell buffer stream that didn't originate from IPC.

Approved by mir-ci-bot, Cemil Azizoglu, Chris Halse Rogers.

Show diffs side-by-side

added added

removed removed

Lines of Context:
60
60
 
61
61
mc::Stream::Stream(
62
62
    mc::FrameDroppingPolicyFactory const& policy_factory,
63
 
    std::unique_ptr<frontend::ClientBuffers> map, geom::Size size, MirPixelFormat pf) :
 
63
    std::shared_ptr<frontend::ClientBuffers> map, geom::Size size, MirPixelFormat pf) :
64
64
    drop_policy(policy_factory.create_policy(std::make_unique<DroppingCallback>(this))),
65
65
    schedule_mode(ScheduleMode::Queueing),
66
66
    schedule(std::make_shared<mc::QueueingSchedule>()),
67
 
    buffers(std::move(map)),
 
67
    buffers(map),
68
68
    arbiter(std::make_shared<mc::MultiMonitorArbiter>(
69
69
        mc::MultiMonitorMode::multi_monitor_sync, buffers, schedule)),
70
70
    size(size),
78
78
    auto server_count = schedule->num_scheduled();
79
79
    if (arbiter->has_buffer())
80
80
        server_count++;
81
 
    return total_buffer_count - server_count;
 
81
    return associated_buffers.size() - server_count;
82
82
}
83
83
 
84
84
void mc::Stream::swap_buffers(mg::Buffer* buffer, std::function<void(mg::Buffer* new_buffer)> fn)
90
90
            first_frame_posted = true;
91
91
            buffers->receive_buffer(buffer->id());
92
92
            schedule->schedule((*buffers)[buffer->id()]);
93
 
            if (client_owned_buffer_count(lk) == 0)
 
93
            if (!associated_buffers.empty() && (client_owned_buffer_count(lk) == 0))
94
94
                drop_policy->swap_now_blocking();
95
95
        }
96
96
        observers.frame_posted(1, buffer->size());
208
208
    return first_frame_posted;
209
209
}
210
210
 
211
 
mg::BufferID mc::Stream::allocate_buffer(mg::BufferProperties const& properties)
212
 
{
213
 
    {
214
 
        std::lock_guard<decltype(mutex)> lk(mutex); 
215
 
        total_buffer_count++;
216
 
    }
217
 
    return buffers->add_buffer(properties);
218
 
}
219
 
 
220
 
void mc::Stream::remove_buffer(mg::BufferID id)
221
 
{
222
 
    {
223
 
        std::lock_guard<decltype(mutex)> lk(mutex); 
224
 
        total_buffer_count--;
225
 
    }
226
 
    buffers->remove_buffer(id);
227
 
}
228
 
 
229
 
void mc::Stream::with_buffer(mg::BufferID id, std::function<void(mg::Buffer&)> const& fn)
230
 
{
231
 
    auto buffer = (*buffers)[id];
232
 
    fn(*buffer);
 
211
void mc::Stream::associate_buffer(mg::BufferID id)
 
212
{
 
213
    std::lock_guard<decltype(mutex)> lk(mutex);
 
214
    associated_buffers.insert(id);
 
215
}
 
216
 
 
217
void mc::Stream::disassociate_buffer(mg::BufferID id)
 
218
{
 
219
    std::lock_guard<decltype(mutex)> lk(mutex);
 
220
    auto it = associated_buffers.find(id);
 
221
    if (it != associated_buffers.end())
 
222
        associated_buffers.erase(it);
233
223
}
234
224
 
235
225
void mc::Stream::set_scale(float)