1
// libTorrent - BitTorrent library
2
// Copyright (C) 2005-2007, Jari Sundell
4
// This program is free software; you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation; either version 2 of the License, or
7
// (at your option) any later version.
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
// GNU General Public License for more details.
14
// You should have received a copy of the GNU General Public License
15
// along with this program; if not, write to the Free Software
16
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
// In addition, as a special exception, the copyright holders give
19
// permission to link the code of portions of this program with the
20
// OpenSSL library under certain conditions as described in each
21
// individual source file, and distribute linked combinations
24
// You must obey the GNU General Public License in all respects for
25
// all of the code used other than OpenSSL. If you modify file(s)
26
// with this exception, you may extend this exception to your version
27
// of the file(s), but you are not obligated to do so. If you do not
28
// wish to do so, delete this exception statement from your version.
29
// If you delete this exception statement from all source files in the
30
// program, then also delete it here.
32
// Contact: Jari Sundell <jaris@ifi.uio.no>
35
// 3185 Skoppum, NORWAY
41
#include "torrent/exceptions.h"
43
#include "hash_queue.h"
44
#include "hash_chunk.h"
46
#include "chunk_list_node.h"
51
struct HashQueueEqual {
52
HashQueueEqual(HashQueueNode::id_type id, uint32_t index) : m_id(id), m_index(index) {}
54
bool operator () (const HashQueueNode& q) const { return m_id == q.id() && m_index == q.get_index(); }
56
HashQueueNode::id_type m_id;
60
struct HashQueueWillneed {
61
HashQueueWillneed(int bytes) : m_bytes(bytes) {}
63
bool operator () (HashQueueNode& q) { return (m_bytes -= q.call_willneed()) <= 0; }
68
// If madvise is not available it will always mark the pages as being
69
// in memory, thus we don't need to modify m_maxTries to have full
70
// disk usage. But this may cause too much blocking as it will think
71
// everything is in memory, thus we need to throttle.
73
HashQueue::HashQueue() :
74
m_readAhead(10 << 20),
78
m_taskWork.set_slot(rak::mem_fn(this, &HashQueue::work));
83
HashQueue::willneed(int bytes) {
84
std::find_if(begin(), end(), HashQueueWillneed(bytes));
87
// If we're done immediately, move the chunk to the front of the list so
88
// the next work cycle gets stuff done.
90
HashQueue::push_back(ChunkHandle handle, slot_done_type d) {
91
if (!handle.is_valid())
92
throw internal_error("HashQueue::add(...) received an invalid chunk");
94
HashChunk* hc = new HashChunk(handle);
97
if (m_taskWork.is_queued())
98
throw internal_error("Empty HashQueue is still in task schedule");
101
priority_queue_insert(&taskScheduler, &m_taskWork, cachedTime + 1);
104
base_type::push_back(HashQueueNode(hc, d));
105
willneed(m_readAhead);
109
HashQueue::has(HashQueueNode::id_type id) {
110
return std::find_if(begin(), end(), rak::equal(id, std::mem_fun_ref(&HashQueueNode::id))) != end();
114
HashQueue::has(HashQueueNode::id_type id, uint32_t index) {
115
return std::find_if(begin(), end(), HashQueueEqual(id, index)) != end();
119
HashQueue::remove(HashQueueNode::id_type id) {
120
iterator itr = begin();
122
while ((itr = std::find_if(itr, end(), rak::equal(id, std::mem_fun_ref(&HashQueueNode::id)))) != end()) {
123
itr->slot_done()(*itr->get_chunk()->chunk(), NULL);
130
priority_queue_erase(&taskScheduler, &m_taskWork);
136
throw internal_error("HashQueue::clear() called but valid nodes were found.");
138
// Replace with a dtor check to ensure it is empty?
139
// std::for_each(begin(), end(), std::mem_fun_ref(&HashQueueNode::clear));
140
// base_type::clear();
141
// priority_queue_erase(&taskScheduler, &m_taskWork);
149
if (!check(++m_tries >= m_maxTries))
150
return priority_queue_insert(&taskScheduler, &m_taskWork, cachedTime + m_interval);
152
if (!empty() && !m_taskWork.is_queued())
153
priority_queue_insert(&taskScheduler, &m_taskWork, cachedTime + 1);
155
m_tries = std::min(0, m_tries - 2);
159
HashQueue::check(bool force) {
160
if (!base_type::front().perform(force)) {
161
willneed(m_readAhead);
165
HashChunk* chunk = base_type::front().get_chunk();
166
HashQueueNode::slot_done_type slotDone = base_type::front().slot_done();
168
base_type::pop_front();
171
chunk->hash_c(buffer);
173
slotDone(*chunk->chunk(), buffer);
176
// This should be a few chunks ahead.
178
willneed(m_readAhead);