1
// libTorrent - BitTorrent library
2
// Copyright (C) 2005-2007, Jari Sundell
4
// This program is free software; you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation; either version 2 of the License, or
7
// (at your option) any later version.
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
// GNU General Public License for more details.
14
// You should have received a copy of the GNU General Public License
15
// along with this program; if not, write to the Free Software
16
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
// In addition, as a special exception, the copyright holders give
19
// permission to link the code of portions of this program with the
20
// OpenSSL library under certain conditions as described in each
21
// individual source file, and distribute linked combinations
24
// You must obey the GNU General Public License in all respects for
25
// all of the code used other than OpenSSL. If you modify file(s)
26
// with this exception, you may extend this exception to your version
27
// of the file(s), but you are not obligated to do so. If you do not
28
// wish to do so, delete this exception statement from your version.
29
// If you delete this exception statement from all source files in the
30
// program, then also delete it here.
32
// Contact: Jari Sundell <jaris@ifi.uio.no>
35
// 3185 Skoppum, NORWAY
39
#include "torrent/exceptions.h"
40
#include "torrent/chunk_manager.h"
42
#include "chunk_list.h"
48
struct chunk_list_earliest_modified {
49
chunk_list_earliest_modified() : m_time(cachedTime) {}
51
void operator () (ChunkListNode* node) {
52
if (node->time_modified() < m_time && node->time_modified() != rak::timer())
53
m_time = node->time_modified();
59
struct chunk_list_sort_index {
60
bool operator () (ChunkListNode* node1, ChunkListNode* node2) {
61
return node1->index() < node2->index();
66
ChunkList::is_queued(ChunkListNode* node) {
67
return std::find(m_queue.begin(), m_queue.end(), node) != m_queue.end();
71
ChunkList::has_chunk(size_type index, int prot) const {
72
return base_type::at(index).is_valid() && base_type::at(index).chunk()->has_permissions(prot);
76
ChunkList::resize(size_type s) {
78
throw internal_error("ChunkList::resize(...) called on an non-empty object.");
84
for (iterator itr = begin(), last = end(); itr != last; ++itr, ++index)
85
itr->set_index(index);
90
// Don't do any sync'ing as whomever decided to shut down really
91
// doesn't care, so just de-reference all chunks in queue.
92
for (Queue::iterator itr = m_queue.begin(), last = m_queue.end(); itr != last; ++itr) {
93
if ((*itr)->references() != 1 || (*itr)->writable() != 1)
94
throw internal_error("ChunkList::clear() called but a node in the queue is still referenced.");
102
if (std::find_if(begin(), end(), std::mem_fun_ref(&ChunkListNode::chunk)) != end() ||
103
std::find_if(begin(), end(), std::mem_fun_ref(&ChunkListNode::references)) != end() ||
104
std::find_if(begin(), end(), std::mem_fun_ref(&ChunkListNode::writable)) != end())
105
throw internal_error("ChunkList::clear() called but a valid node was found.");
111
ChunkList::get(size_type index, bool writable) {
112
rak::error_number::clear_global();
114
ChunkListNode* node = &base_type::at(index);
116
if (!node->is_valid()) {
117
Chunk* chunk = m_slotCreateChunk(index, MemoryChunk::prot_read | (writable ? MemoryChunk::prot_write : 0));
120
return ChunkHandle::from_error(rak::error_number::current().is_valid() ? rak::error_number::current() : rak::error_number::e_noent);
122
// Would be cleaner to do this before creating the chunk.
123
if (!m_manager->allocate(chunk->chunk_size())) {
125
return ChunkHandle::from_error(rak::error_number::e_nomem);
128
node->set_chunk(chunk);
129
node->set_time_modified(rak::timer());
131
} else if (writable && !node->chunk()->is_writable()) {
132
Chunk* chunk = m_slotCreateChunk(index, MemoryChunk::prot_read | (writable ? MemoryChunk::prot_write : 0));
135
return ChunkHandle::from_error(rak::error_number::current().is_valid() ? rak::error_number::current() : rak::error_number::e_noent);
137
delete node->chunk();
139
node->set_chunk(chunk);
140
node->set_time_modified(rak::timer());
143
node->inc_references();
146
node->inc_writable();
148
// Make sure that periodic syncing uses async on any subsequent
149
// changes even if it was triggered before this get.
150
node->set_sync_triggered(false);
153
return ChunkHandle(node, writable);
156
// The chunks in 'm_queue' have been modified and need to be synced
157
// when appropriate. Hopefully keeping the chunks mmap'ed for a while
158
// will allow us to schedule writes at more resonable intervals.
161
ChunkList::release(ChunkHandle* handle) {
162
if (!handle->is_valid())
163
throw internal_error("ChunkList::release(...) received an invalid handle.");
165
if (handle->object() < &*begin() || handle->object() >= &*end())
166
throw internal_error("ChunkList::release(...) received an unknown handle.");
168
if (handle->object()->references() <= 0 || (handle->is_writable() && handle->object()->writable() <= 0))
169
throw internal_error("ChunkList::release(...) received a node with bad reference count.");
171
if (handle->is_writable()) {
173
if (handle->object()->writable() == 1) {
174
if (is_queued(handle->object()))
175
throw internal_error("ChunkList::release(...) tried to queue an already queued chunk.");
177
// Only add those that have a modification time set?
179
// Only chunks that are not already in the queue will execute
181
m_queue.push_back(handle->object());
184
handle->object()->dec_rw();
188
if (handle->object()->dec_references() == 0) {
189
if (is_queued(handle->object()))
190
throw internal_error("ChunkList::release(...) tried to unmap a queued chunk.");
192
clear_chunk(handle->object());
200
ChunkList::clear_chunk(ChunkListNode* node) {
201
if (!node->is_valid())
202
throw internal_error("ChunkList::clear_chunk(...) !node->is_valid().");
204
uint32_t size = node->chunk()->chunk_size();
206
delete node->chunk();
207
node->set_chunk(NULL);
209
m_manager->deallocate(size);
213
ChunkList::sync_chunk(ChunkListNode* node, std::pair<int,bool> options) {
214
if (node->references() <= 0 || node->writable() <= 0)
215
throw internal_error("ChunkList::sync_chunk(...) got a node with invalid reference count.");
217
if (!node->chunk()->sync(options.first))
220
node->set_sync_triggered(true);
227
if (node->references() == 0)
234
ChunkList::sync_chunks(int flags) {
235
Queue::iterator split;
237
if (flags & sync_all)
238
split = m_queue.begin();
240
split = std::stable_partition(m_queue.begin(), m_queue.end(), rak::not_equal(1, std::mem_fun(&ChunkListNode::writable)));
242
// Allow a flag that does more culling, so that we only get large
243
// continous sections.
245
// How does this interact with timers, should be make it so that
246
// only areas with timers are (preferably) synced?
248
std::sort(split, m_queue.end());
250
// If we got enough diskspace and have not requested safe syncing,
251
// then sync all chunks with MS_ASYNC.
252
if (!(flags & (sync_safe | sync_sloppy))) {
253
if (m_manager->safe_sync() || m_slotFreeDiskspace() <= m_manager->safe_free_diskspace())
259
// TODO: This won't trigger for default sync_force.
260
if ((flags & sync_use_timeout) && !(flags & sync_force))
261
split = partition_optimize(split, m_queue.end(), 50, 5, false);
265
for (Queue::iterator itr = split, last = m_queue.end(); itr != last; ++itr) {
267
// We can easily skip pieces by swap_iter, so there should be no
268
// problem being selective about the ranges we sync.
270
// Use a function for checking the next few chunks and see how far
271
// we want to sync. When we want to sync everything use end. Call
272
// before the loop, or add a check.
274
// if we don't want to sync, swap and break.
276
std::pair<int,bool> options = sync_options(*itr, flags);
278
if (!sync_chunk(*itr, options)) {
279
std::iter_swap(itr, split++);
285
(*itr)->set_sync_triggered(true);
288
std::iter_swap(itr, split++);
291
m_queue.erase(split, m_queue.end());
293
// The caller must either make sure that it is safe to close the
294
// download or set the sync_ignore_error flag.
295
if (failed && !(flags & sync_ignore_error))
296
m_slotStorageError("Could not sync chunk: " + std::string(rak::error_number::current().c_str()));
302
ChunkList::sync_options(ChunkListNode* node, int flags) {
303
// Using if statements since some linkers have problem with static
304
// const int members inside the ?: operators. The compiler should
305
// be optimizing this anyway.
307
if (flags & sync_force) {
309
if (flags & sync_safe)
310
return std::make_pair(MemoryChunk::sync_sync, true);
312
return std::make_pair(MemoryChunk::sync_async, true);
314
} else if (flags & sync_safe) {
316
if (node->sync_triggered())
317
return std::make_pair(MemoryChunk::sync_sync, true);
319
return std::make_pair(MemoryChunk::sync_async, false);
322
return std::make_pair(MemoryChunk::sync_async, true);
326
// Using a rather simple algorithm for now. This should really be more
327
// robust against holes withing otherwise compact ranges and take into
328
// consideration chunk size.
329
inline ChunkList::Queue::iterator
330
ChunkList::seek_range(Queue::iterator first, Queue::iterator last) {
331
uint32_t prevIndex = (*first)->index();
333
while (++first != last) {
334
if ((*first)->index() - prevIndex > 5)
337
prevIndex = (*first)->index();
344
ChunkList::check_node(ChunkListNode* node) {
346
node->time_modified() != rak::timer() &&
347
node->time_modified() + rak::timer::from_seconds(m_manager->timeout_sync()) < cachedTime;
350
// Optimize the selection of chunks to sync. Continuous regions are
351
// preferred, while if too fragmented or if too few chunks are
352
// available it skips syncing of all chunks.
354
ChunkList::Queue::iterator
355
ChunkList::partition_optimize(Queue::iterator first, Queue::iterator last, int weight, int maxDistance, bool dontSkip) {
356
for (Queue::iterator itr = first; itr != last;) {
357
Queue::iterator range = seek_range(itr, last);
359
bool required = std::find_if(itr, range, std::bind1st(std::mem_fun(&ChunkList::check_node), this)) != range;
360
dontSkip = dontSkip || required;
362
if (!required && std::distance(itr, range) < maxDistance) {
363
// Don't sync this range.
364
unsigned int l = std::min(range - itr, itr - first);
365
std::swap_ranges(first, first + l, range - l);
370
// This probably increases too fast.
371
weight -= std::distance(itr, range) * std::distance(itr, range);
377
// These values are all arbritrary...
378
if (!dontSkip && weight > 0)