1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
|
// SPDX-License-Identifier: LGPL-2.1
/*
* Copyright (C) SUSE 2024
* Author(s): Enzo Matsumiya <ematsumiya@suse.de>
*
* Splice support for cifs.ko
*/
#include "splice.h"
ssize_t cifs_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
{
pr_err("%s: start pos %lld, len %zu, flags 0x%x\n", __func__, ppos ? *ppos : 0, len, flags);
return in->f_op->splice_read(in, ppos, pipe, len, flags);
#if 0
struct folio_batch fbatch;
struct kiocb iocb;
size_t total_spliced = 0, used, npages;
loff_t isize, end_offset;
bool writably_mapped;
int i, error = 0;
if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))
return 0;
init_sync_kiocb(&iocb, in);
iocb.ki_pos = *ppos;
/* Work out how much data we can actually add into the pipe */
used = pipe_occupancy(pipe->head, pipe->tail);
npages = max_t(ssize_t, pipe->max_usage - used, 0);
len = min_t(size_t, len, npages * PAGE_SIZE);
folio_batch_init(&fbatch);
do {
cond_resched();
if (*ppos >= i_size_read(in->f_mapping->host))
break;
iocb.ki_pos = *ppos;
error = filemap_get_pages(&iocb, len, &fbatch, true);
if (error < 0)
break;
/*
* i_size must be checked after we know the pages are Uptodate.
*
* Checking i_size after the check allows us to calculate
* the correct value for "nr", which means the zero-filled
* part of the page is not copied back to userspace (unless
* another truncate extends the file - this is desired though).
*/
isize = i_size_read(in->f_mapping->host);
if (unlikely(*ppos >= isize))
break;
end_offset = min_t(loff_t, isize, *ppos + len);
/*
* Once we start copying data, we don't want to be touching any
* cachelines that might be contended:
*/
writably_mapped = mapping_writably_mapped(in->f_mapping);
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
size_t n;
if (folio_pos(folio) >= end_offset)
goto out;
folio_mark_accessed(folio);
/*
* If users can be writing to this folio using arbitrary
* virtual addresses, take care of potential aliasing
* before reading the folio on the kernel side.
*/
if (writably_mapped)
flush_dcache_folio(folio);
n = min_t(loff_t, len, isize - *ppos);
n = splice_folio_into_pipe(pipe, folio, *ppos, n);
if (!n)
goto out;
len -= n;
total_spliced += n;
*ppos += n;
in->f_ra.prev_pos = *ppos;
if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
goto out;
}
folio_batch_release(&fbatch);
} while (len);
out:
folio_batch_release(&fbatch);
file_accessed(in);
return total_spliced ? total_spliced : error;
#endif
}
ssize_t
iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
pr_err("%s: start fname %s, pos %lld, len %zu, flags 0x%x\n", __func__, out->f_path.dentry->d_name.name, ppos ? *ppos : 0, len, flags);
return out->f_op->splice_write(pipe, out, ppos, len, flags);
#if 0
struct splice_desc sd = {
.total_len = len,
.flags = flags,
.pos = *ppos,
.u.file = out,
};
int nbufs = pipe->max_usage;
struct bio_vec *array;
ssize_t ret;
if (!out->f_op->write_iter)
return -EINVAL;
array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL);
if (unlikely(!array))
return -ENOMEM;
pipe_lock(pipe);
splice_from_pipe_begin(&sd);
while (sd.total_len) {
struct kiocb kiocb;
struct iov_iter from;
unsigned int head, tail, mask;
size_t left;
int n;
ret = splice_from_pipe_next(pipe, &sd);
if (ret <= 0)
break;
if (unlikely(nbufs < pipe->max_usage)) {
kfree(array);
nbufs = pipe->max_usage;
array = kcalloc(nbufs, sizeof(struct bio_vec),
GFP_KERNEL);
if (!array) {
ret = -ENOMEM;
break;
}
}
head = pipe->head;
tail = pipe->tail;
mask = pipe->ring_size - 1;
/* build the vector */
left = sd.total_len;
for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++) {
struct pipe_buffer *buf = &pipe->bufs[tail & mask];
size_t this_len = buf->len;
/* zero-length bvecs are not supported, skip them */
if (!this_len)
continue;
this_len = min(this_len, left);
ret = pipe_buf_confirm(pipe, buf);
if (unlikely(ret)) {
if (ret == -ENODATA)
ret = 0;
goto done;
}
bvec_set_page(&array[n], buf->page, this_len,
buf->offset);
left -= this_len;
n++;
}
iov_iter_bvec(&from, ITER_SOURCE, array, n, sd.total_len - left);
init_sync_kiocb(&kiocb, out);
kiocb.ki_pos = sd.pos;
ret = call_write_iter(out, &kiocb, &from);
sd.pos = kiocb.ki_pos;
if (ret <= 0)
break;
sd.num_spliced += ret;
sd.total_len -= ret;
*ppos = sd.pos;
/* dismiss the fully eaten buffers, adjust the partial one */
tail = pipe->tail;
while (ret) {
struct pipe_buffer *buf = &pipe->bufs[tail & mask];
if (ret >= buf->len) {
ret -= buf->len;
buf->len = 0;
pipe_buf_release(pipe, buf);
tail++;
pipe->tail = tail;
if (pipe->files)
sd.need_wakeup = true;
} else {
buf->offset += ret;
buf->len -= ret;
ret = 0;
}
}
}
done:
kfree(array);
splice_from_pipe_end(pipe, &sd);
pipe_unlock(pipe);
if (sd.num_spliced)
ret = sd.num_spliced;
return ret;
#endif
}
|