1
// libTorrent - BitTorrent library
2
// Copyright (C) 2005-2007, Jari Sundell
4
// This program is free software; you can redistribute it and/or modify
5
// it under the terms of the GNU General Public License as published by
6
// the Free Software Foundation; either version 2 of the License, or
7
// (at your option) any later version.
9
// This program is distributed in the hope that it will be useful,
10
// but WITHOUT ANY WARRANTY; without even the implied warranty of
11
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
// GNU General Public License for more details.
14
// You should have received a copy of the GNU General Public License
15
// along with this program; if not, write to the Free Software
16
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
// In addition, as a special exception, the copyright holders give
19
// permission to link the code of portions of this program with the
20
// OpenSSL library under certain conditions as described in each
21
// individual source file, and distribute linked combinations
24
// You must obey the GNU General Public License in all respects for
25
// all of the code used other than OpenSSL. If you modify file(s)
26
// with this exception, you may extend this exception to your version
27
// of the file(s), but you are not obligated to do so. If you do not
28
// wish to do so, delete this exception statement from your version.
29
// If you delete this exception statement from all source files in the
30
// program, then also delete it here.
32
// Contact: Jari Sundell <jaris@ifi.uio.no>
35
// 3185 Skoppum, NORWAY
41
#include <rak/functional.h>
43
#include "protocol/peer_chunks.h"
44
#include "torrent/exceptions.h"
46
#include "chunk_selector.h"
47
#include "chunk_statistics.h"
51
// Consider making statistics a part of selector.
53
ChunkSelector::initialize(Bitfield* bf, ChunkStatistics* cs) {
54
m_position = invalid_chunk;
57
m_bitfield.set_size_bits(bf->size_bits());
58
m_bitfield.allocate();
59
std::transform(bf->begin(), bf->end(), m_bitfield.begin(), rak::invert<Bitfield::value_type>());
62
m_sharedQueue.enable(32);
63
m_sharedQueue.clear();
67
ChunkSelector::cleanup() {
72
// Consider if ChunksSelector::not_using_index(...) needs to be
75
ChunkSelector::update_priorities() {
79
m_sharedQueue.clear();
81
if (m_position == invalid_chunk)
82
m_position = random() % size();
88
ChunkSelector::find(PeerChunks* pc, __UNUSED bool highPriority) {
89
// This needs to be re-enabled.
90
if (m_position == invalid_chunk)
93
// When we're a seeder, 'm_sharedQueue' is used. Since the peer's
94
// bitfield is guaranteed to be filled we can use the same code as
95
// for non-seeders. This generalization does incur a slight
96
// performance hit as it compares against a bitfield we know has all
98
rak::partial_queue* queue = pc->is_seeder() ? &m_sharedQueue : pc->download_cache();
100
// Randomize position on average every 16 chunks to prevent
101
// inefficient distribution with a slow seed and fast peers
102
// all arriving at the same position.
103
if ((random() & 63) == 0) {
104
m_position = random() % size();
108
if (queue->is_enabled()) {
110
// First check the cached queue.
111
while (queue->prepare_pop()) {
112
uint32_t pos = queue->pop();
114
if (!m_bitfield.get(pos))
121
// Only non-seeders can reach this branch as m_sharedQueue is
128
(search_linear(pc->bitfield(), queue, &m_highPriority, m_position, size()) &&
129
search_linear(pc->bitfield(), queue, &m_highPriority, 0, m_position));
131
if (queue->prepare_pop()) {
132
// Set that the peer has high priority pieces cached.
135
// Set that the peer has normal priority pieces cached.
140
(search_linear(pc->bitfield(), queue, &m_normalPriority, m_position, size()) &&
141
search_linear(pc->bitfield(), queue, &m_normalPriority, 0, m_position));
143
if (!queue->prepare_pop())
144
return invalid_chunk;
147
uint32_t pos = queue->pop();
149
if (!m_bitfield.get(pos))
150
throw internal_error("ChunkSelector::find(...) bad index.");
156
ChunkSelector::is_wanted(uint32_t index) const {
157
return m_bitfield.get(index) && (m_normalPriority.has(index) || m_highPriority.has(index));
161
ChunkSelector::using_index(uint32_t index) {
163
throw internal_error("ChunkSelector::select_index(...) index out of range.");
165
if (!m_bitfield.get(index))
166
throw internal_error("ChunkSelector::select_index(...) index already set.");
168
m_bitfield.unset(index);
170
// We always know 'm_position' points to a wanted chunk. If it
171
// changes, we need to move m_position to the next one.
172
if (index == m_position)
177
ChunkSelector::not_using_index(uint32_t index) {
179
throw internal_error("ChunkSelector::deselect_index(...) index out of range.");
181
if (m_bitfield.get(index))
182
throw internal_error("ChunkSelector::deselect_index(...) index already unset.");
184
m_bitfield.set(index);
186
// This will make sure that if we enable new chunks, it will start
187
// downloading them event when 'index == invalid_chunk'.
188
if (m_position == invalid_chunk)
192
// This could propably be split into two functions, one for checking
193
// if it shoul insert into the download_queue(), and the other
194
// whetever we are interested in the new piece.
196
// Since this gets called whenever a new piece arrives, we can skip
197
// the rarity-first/linear etc searches through bitfields and just
198
// start downloading.
200
ChunkSelector::received_have_chunk(PeerChunks* pc, uint32_t index) {
201
if (!m_bitfield.get(index))
204
// Also check if the peer only has high-priority chunks.
205
if (!m_highPriority.has(index) && !m_normalPriority.has(index))
208
if (pc->download_cache()->is_enabled())
209
pc->download_cache()->insert(m_statistics->rarity(index), index);
215
ChunkSelector::search_linear(const Bitfield* bf, rak::partial_queue* pq, priority_ranges* ranges, uint32_t first, uint32_t last) {
216
priority_ranges::iterator itr = ranges->find(first);
218
while (itr != ranges->end() && itr->first < last) {
220
if (!search_linear_range(bf, pq, std::max(first, itr->first), std::min(last, itr->second)))
229
// Could propably add another argument for max seen or something, this
230
// would be used to find better chunks to request.
232
ChunkSelector::search_linear_range(const Bitfield* bf, rak::partial_queue* pq, uint32_t first, uint32_t last) {
233
if (first >= last || last > size())
234
throw internal_error("ChunkSelector::search_linear_range(...) received an invalid range.");
236
Bitfield::const_iterator local = m_bitfield.begin() + first / 8;
237
Bitfield::const_iterator source = bf->begin() + first / 8;
239
// Unset any bits before 'first'.
240
Bitfield::value_type wanted = (*source & *local) & Bitfield::mask_from(first % 8);
242
while (m_bitfield.position(local + 1) < last) {
243
if (wanted && !search_linear_byte(pq, m_bitfield.position(local), wanted))
246
wanted = (*++source & *++local);
249
// Unset any bits from 'last'.
250
wanted &= Bitfield::mask_before(last - m_bitfield.position(local));
253
return search_linear_byte(pq, m_bitfield.position(local), wanted);
258
// Take pointer to partial_queue
260
ChunkSelector::search_linear_byte(rak::partial_queue* pq, uint32_t index, Bitfield::value_type wanted) {
261
for (int i = 0; i < 8; ++i) {
262
if (!(wanted & Bitfield::mask_at(i)))
265
if (!pq->insert(m_statistics->rarity(index + i), index + i) && pq->is_full())
273
ChunkSelector::advance_position() {
275
// Need to replace with a special-purpose function for finding the
278
// int position = m_position;
280
// ((m_position = search_linear(&m_bitfield, &m_highPriority, position, size())) == invalid_chunk &&
281
// (m_position = search_linear(&m_bitfield, &m_highPriority, 0, position)) == invalid_chunk &&
282
// (m_position = search_linear(&m_bitfield, &m_normalPriority, position, size())) == invalid_chunk &&
283
// (m_position = search_linear(&m_bitfield, &m_normalPriority, 0, position)) == invalid_chunk);