120
/* Map SG page in kernel virtual address space and copy */
121
static inline void sg_map_copy(u8 *dest, struct scatterlist *sg,
127
* Page here can be user-space pinned using get_user_pages
128
* Same must be kmapped before use and kunmapped subsequently
130
mapped_addr = kmap_atomic(sg_page(sg));
131
memcpy(dest, mapped_addr + offset, len);
132
kunmap_atomic(mapped_addr);
135
/* Copy from len bytes of sg to dest, starting from beginning */
136
static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len)
138
struct scatterlist *current_sg = sg;
139
int cpy_index = 0, next_cpy_index = current_sg->length;
141
while (next_cpy_index < len) {
142
sg_map_copy(dest + cpy_index, current_sg, current_sg->length,
144
current_sg = scatterwalk_sg_next(current_sg);
145
cpy_index = next_cpy_index;
146
next_cpy_index += current_sg->length;
149
sg_map_copy(dest + cpy_index, current_sg, len-cpy_index,
153
/* Copy sg data, from to_skip to end, to dest */
154
static inline void sg_copy_part(u8 *dest, struct scatterlist *sg,
155
int to_skip, unsigned int end)
157
struct scatterlist *current_sg = sg;
158
int sg_index, cpy_index, offset;
160
sg_index = current_sg->length;
161
while (sg_index <= to_skip) {
162
current_sg = scatterwalk_sg_next(current_sg);
163
sg_index += current_sg->length;
165
cpy_index = sg_index - to_skip;
166
offset = current_sg->offset + current_sg->length - cpy_index;
167
sg_map_copy(dest, current_sg, cpy_index, offset);
168
if (end - sg_index) {
169
current_sg = scatterwalk_sg_next(current_sg);
170
sg_copy(dest + cpy_index, current_sg, end - sg_index);