summaryrefslogtreecommitdiff
path: root/fs/smb/client/splice.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/smb/client/splice.c')
-rw-r--r--fs/smb/client/splice.c225
1 files changed, 225 insertions, 0 deletions
diff --git a/fs/smb/client/splice.c b/fs/smb/client/splice.c
new file mode 100644
index 000000000000..1125fe2868d5
--- /dev/null
+++ b/fs/smb/client/splice.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * Copyright (C) SUSE 2024
+ * Author(s): Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * Splice support for cifs.ko
+ */
+#include "splice.h"
+
+ssize_t cifs_splice_read(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe,
+ size_t len, unsigned int flags)
+{
+ pr_err("%s: start pos %lld, len %zu, flags 0x%x\n", __func__, ppos ? *ppos : 0, len, flags);
+ return in->f_op->splice_read(in, ppos, pipe, len, flags);
+#if 0
+ struct folio_batch fbatch;
+ struct kiocb iocb;
+ size_t total_spliced = 0, used, npages;
+ loff_t isize, end_offset;
+ bool writably_mapped;
+ int i, error = 0;
+
+ if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))
+ return 0;
+
+ init_sync_kiocb(&iocb, in);
+ iocb.ki_pos = *ppos;
+
+ /* Work out how much data we can actually add into the pipe */
+ used = pipe_occupancy(pipe->head, pipe->tail);
+ npages = max_t(ssize_t, pipe->max_usage - used, 0);
+ len = min_t(size_t, len, npages * PAGE_SIZE);
+
+ folio_batch_init(&fbatch);
+
+ do {
+ cond_resched();
+
+ if (*ppos >= i_size_read(in->f_mapping->host))
+ break;
+
+ iocb.ki_pos = *ppos;
+ error = filemap_get_pages(&iocb, len, &fbatch, true);
+ if (error < 0)
+ break;
+
+ /*
+ * i_size must be checked after we know the pages are Uptodate.
+ *
+ * Checking i_size after the check allows us to calculate
+ * the correct value for "nr", which means the zero-filled
+ * part of the page is not copied back to userspace (unless
+ * another truncate extends the file - this is desired though).
+ */
+ isize = i_size_read(in->f_mapping->host);
+ if (unlikely(*ppos >= isize))
+ break;
+ end_offset = min_t(loff_t, isize, *ppos + len);
+
+ /*
+ * Once we start copying data, we don't want to be touching any
+ * cachelines that might be contended:
+ */
+ writably_mapped = mapping_writably_mapped(in->f_mapping);
+
+ for (i = 0; i < folio_batch_count(&fbatch); i++) {
+ struct folio *folio = fbatch.folios[i];
+ size_t n;
+
+ if (folio_pos(folio) >= end_offset)
+ goto out;
+ folio_mark_accessed(folio);
+
+ /*
+ * If users can be writing to this folio using arbitrary
+ * virtual addresses, take care of potential aliasing
+ * before reading the folio on the kernel side.
+ */
+ if (writably_mapped)
+ flush_dcache_folio(folio);
+
+ n = min_t(loff_t, len, isize - *ppos);
+ n = splice_folio_into_pipe(pipe, folio, *ppos, n);
+ if (!n)
+ goto out;
+ len -= n;
+ total_spliced += n;
+ *ppos += n;
+ in->f_ra.prev_pos = *ppos;
+ if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
+ goto out;
+ }
+
+ folio_batch_release(&fbatch);
+ } while (len);
+
+out:
+ folio_batch_release(&fbatch);
+ file_accessed(in);
+
+ return total_spliced ? total_spliced : error;
+#endif
+}
+
+ssize_t
+iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
+ loff_t *ppos, size_t len, unsigned int flags)
+{
+ pr_err("%s: start fname %s, pos %lld, len %zu, flags 0x%x\n", __func__, out->f_path.dentry->d_name.name, ppos ? *ppos : 0, len, flags);
+ return out->f_op->splice_write(pipe, out, ppos, len, flags);
+#if 0
+ struct splice_desc sd = {
+ .total_len = len,
+ .flags = flags,
+ .pos = *ppos,
+ .u.file = out,
+ };
+ int nbufs = pipe->max_usage;
+ struct bio_vec *array;
+ ssize_t ret;
+
+ if (!out->f_op->write_iter)
+ return -EINVAL;
+
+ array = kcalloc(nbufs, sizeof(struct bio_vec), GFP_KERNEL);
+ if (unlikely(!array))
+ return -ENOMEM;
+
+ pipe_lock(pipe);
+
+ splice_from_pipe_begin(&sd);
+ while (sd.total_len) {
+ struct kiocb kiocb;
+ struct iov_iter from;
+ unsigned int head, tail, mask;
+ size_t left;
+ int n;
+
+ ret = splice_from_pipe_next(pipe, &sd);
+ if (ret <= 0)
+ break;
+
+ if (unlikely(nbufs < pipe->max_usage)) {
+ kfree(array);
+ nbufs = pipe->max_usage;
+ array = kcalloc(nbufs, sizeof(struct bio_vec),
+ GFP_KERNEL);
+ if (!array) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ head = pipe->head;
+ tail = pipe->tail;
+ mask = pipe->ring_size - 1;
+
+ /* build the vector */
+ left = sd.total_len;
+ for (n = 0; !pipe_empty(head, tail) && left && n < nbufs; tail++) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ size_t this_len = buf->len;
+
+ /* zero-length bvecs are not supported, skip them */
+ if (!this_len)
+ continue;
+ this_len = min(this_len, left);
+
+ ret = pipe_buf_confirm(pipe, buf);
+ if (unlikely(ret)) {
+ if (ret == -ENODATA)
+ ret = 0;
+ goto done;
+ }
+
+ bvec_set_page(&array[n], buf->page, this_len,
+ buf->offset);
+ left -= this_len;
+ n++;
+ }
+
+ iov_iter_bvec(&from, ITER_SOURCE, array, n, sd.total_len - left);
+ init_sync_kiocb(&kiocb, out);
+ kiocb.ki_pos = sd.pos;
+ ret = call_write_iter(out, &kiocb, &from);
+ sd.pos = kiocb.ki_pos;
+ if (ret <= 0)
+ break;
+
+ sd.num_spliced += ret;
+ sd.total_len -= ret;
+ *ppos = sd.pos;
+
+ /* dismiss the fully eaten buffers, adjust the partial one */
+ tail = pipe->tail;
+ while (ret) {
+ struct pipe_buffer *buf = &pipe->bufs[tail & mask];
+ if (ret >= buf->len) {
+ ret -= buf->len;
+ buf->len = 0;
+ pipe_buf_release(pipe, buf);
+ tail++;
+ pipe->tail = tail;
+ if (pipe->files)
+ sd.need_wakeup = true;
+ } else {
+ buf->offset += ret;
+ buf->len -= ret;
+ ret = 0;
+ }
+ }
+ }
+done:
+ kfree(array);
+ splice_from_pipe_end(pipe, &sd);
+
+ pipe_unlock(pipe);
+
+ if (sd.num_spliced)
+ ret = sd.num_spliced;
+
+ return ret;
+#endif
+}
+