VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 72601

Last change on this file since 72601 was 70786, checked in by vboxsync, 7 years ago

linux/shared folders: use indent to switch Linux-only files to kernel style.
bugref:9109: Shared folders: update to match in-kernel code more closely

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.6 KB
Line 
1/* $Id: regops.c 70786 2018-01-29 10:57:10Z vboxsync $ */
2/** @file
3 * vboxsf - VBox Linux Shared Folders, Regular file inode and file operations.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * Limitations: only COW memory mapping is supported
20 */
21
22#include "vfsmod.h"
23
24static void *alloc_bounce_buffer(size_t * tmp_sizep, PRTCCPHYS physp, size_t
25 xfer_size, const char *caller)
26{
27 size_t tmp_size;
28 void *tmp;
29
30 /* try for big first. */
31 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
32 if (tmp_size > 16U * _1K)
33 tmp_size = 16U * _1K;
34 tmp = kmalloc(tmp_size, GFP_KERNEL);
35 if (!tmp) {
36 /* fall back on a page sized buffer. */
37 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
38 if (!tmp) {
39 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
40 return NULL;
41 }
42 tmp_size = PAGE_SIZE;
43 }
44
45 *tmp_sizep = tmp_size;
46 *physp = virt_to_phys(tmp);
47 return tmp;
48}
49
50static void free_bounce_buffer(void *tmp)
51{
52 kfree(tmp);
53}
54
55/* fops */
56static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
57 struct sf_reg_info *sf_r, void *buf,
58 uint32_t * nread, uint64_t pos)
59{
60 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
61 * contiguous in physical memory (kmalloc or single page), we should
62 * use a physical address here to speed things up. */
63 int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->handle,
64 pos, nread, buf, false /* already locked? */ );
65 if (RT_FAILURE(rc)) {
66 LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller,
67 rc));
68 return -EPROTO;
69 }
70 return 0;
71}
72
73static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
74 struct sf_reg_info *sf_r, void *buf,
75 uint32_t * nwritten, uint64_t pos)
76{
77 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
78 * contiguous in physical memory (kmalloc or single page), we should
79 * use a physical address here to speed things up. */
80 int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle,
81 pos, nwritten, buf,
82 false /* already locked? */ );
83 if (RT_FAILURE(rc)) {
84 LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n",
85 caller, rc));
86 return -EPROTO;
87 }
88 return 0;
89}
90
91#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && \
92 LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
93
94void free_pipebuf(struct page *kpage)
95{
96 kunmap(kpage);
97 __free_pages(kpage, 0);
98}
99
100void *sf_pipe_buf_map(struct pipe_inode_info *pipe,
101 struct pipe_buffer *pipe_buf, int atomic)
102{
103 return 0;
104}
105
106void sf_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf)
107{
108}
109
110void sf_pipe_buf_unmap(struct pipe_inode_info *pipe,
111 struct pipe_buffer *pipe_buf, void *map_data)
112{
113}
114
115int sf_pipe_buf_steal(struct pipe_inode_info *pipe,
116 struct pipe_buffer *pipe_buf)
117{
118 return 0;
119}
120
121static void sf_pipe_buf_release(struct pipe_inode_info *pipe,
122 struct pipe_buffer *pipe_buf)
123{
124 free_pipebuf(pipe_buf->page);
125}
126
127int sf_pipe_buf_confirm(struct pipe_inode_info *info,
128 struct pipe_buffer *pipe_buf)
129{
130 return 0;
131}
132
133static struct pipe_buf_operations sf_pipe_buf_ops = {
134 .can_merge = 0,
135 .map = sf_pipe_buf_map,
136 .unmap = sf_pipe_buf_unmap,
137 .confirm = sf_pipe_buf_confirm,
138 .release = sf_pipe_buf_release,
139 .steal = sf_pipe_buf_steal,
140 .get = sf_pipe_buf_get,
141};
142
143#define LOCK_PIPE(pipe) \
144 if (pipe->inode) \
145 mutex_lock(&pipe->inode->i_mutex);
146
147#define UNLOCK_PIPE(pipe) \
148 if (pipe->inode) \
149 mutex_unlock(&pipe->inode->i_mutex);
150
151ssize_t
152sf_splice_read(struct file *in, loff_t * poffset,
153 struct pipe_inode_info *pipe, size_t len, unsigned int flags)
154{
155 size_t bytes_remaining = len;
156 loff_t orig_offset = *poffset;
157 loff_t offset = orig_offset;
158 struct inode *inode = GET_F_DENTRY(in)->d_inode;
159 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
160 struct sf_reg_info *sf_r = in->private_data;
161 ssize_t retval;
162 struct page *kpage = 0;
163 size_t nsent = 0;
164
165 TRACE();
166 if (!S_ISREG(inode->i_mode)) {
167 LogFunc(("read from non regular file %d\n", inode->i_mode));
168 return -EINVAL;
169 }
170 if (!len) {
171 return 0;
172 }
173
174 LOCK_PIPE(pipe);
175
176 uint32_t req_size = 0;
177 while (bytes_remaining > 0) {
178 kpage = alloc_page(GFP_KERNEL);
179 if (unlikely(kpage == NULL)) {
180 UNLOCK_PIPE(pipe);
181 return -ENOMEM;
182 }
183 req_size = 0;
184 uint32_t nread = req_size =
185 (uint32_t) min(bytes_remaining, (size_t) PAGE_SIZE);
186 uint32_t chunk = 0;
187 void *kbuf = kmap(kpage);
188 while (chunk < req_size) {
189 retval =
190 sf_reg_read_aux(__func__, sf_g, sf_r, kbuf + chunk,
191 &nread, offset);
192 if (retval < 0)
193 goto err;
194 if (nread == 0)
195 break;
196 chunk += nread;
197 offset += nread;
198 nread = req_size - chunk;
199 }
200 if (!pipe->readers) {
201 send_sig(SIGPIPE, current, 0);
202 retval = -EPIPE;
203 goto err;
204 }
205 if (pipe->nrbufs < PIPE_BUFFERS) {
206 struct pipe_buffer *pipebuf =
207 pipe->bufs +
208 ((pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS -
209 1));
210 pipebuf->page = kpage;
211 pipebuf->ops = &sf_pipe_buf_ops;
212 pipebuf->len = req_size;
213 pipebuf->offset = 0;
214 pipebuf->private = 0;
215 pipebuf->flags = 0;
216 pipe->nrbufs++;
217 nsent += req_size;
218 bytes_remaining -= req_size;
219 if (signal_pending(current))
220 break;
221 } else { /* pipe full */
222
223 if (flags & SPLICE_F_NONBLOCK) {
224 retval = -EAGAIN;
225 goto err;
226 }
227 free_pipebuf(kpage);
228 break;
229 }
230 }
231 UNLOCK_PIPE(pipe);
232 if (!nsent && signal_pending(current))
233 return -ERESTARTSYS;
234 *poffset += nsent;
235 return offset - orig_offset;
236
237 err:
238 UNLOCK_PIPE(pipe);
239 free_pipebuf(kpage);
240 return retval;
241}
242
243#endif
244
245/**
246 * Read from a regular file.
247 *
248 * @param file the file
249 * @param buf the buffer
250 * @param size length of the buffer
251 * @param off offset within the file
252 * @returns the number of read bytes on success, Linux error code otherwise
253 */
254static ssize_t sf_reg_read(struct file *file, char *buf, size_t size,
255 loff_t * off)
256{
257 int err;
258 void *tmp;
259 RTCCPHYS tmp_phys;
260 size_t tmp_size;
261 size_t left = size;
262 ssize_t total_bytes_read = 0;
263 struct inode *inode = GET_F_DENTRY(file)->d_inode;
264 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
265 struct sf_reg_info *sf_r = file->private_data;
266 loff_t pos = *off;
267
268 TRACE();
269 if (!S_ISREG(inode->i_mode)) {
270 LogFunc(("read from non regular file %d\n", inode->i_mode));
271 return -EINVAL;
272 }
273
274 /** XXX Check read permission according to inode->i_mode! */
275
276 if (!size)
277 return 0;
278
279 tmp =
280 alloc_bounce_buffer(&tmp_size, &tmp_phys, size,
281 __PRETTY_FUNCTION__);
282 if (!tmp)
283 return -ENOMEM;
284
285 while (left) {
286 uint32_t to_read, nread;
287
288 to_read = tmp_size;
289 if (to_read > left)
290 to_read = (uint32_t) left;
291
292 nread = to_read;
293
294 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
295 if (err)
296 goto fail;
297
298 if (copy_to_user(buf, tmp, nread)) {
299 err = -EFAULT;
300 goto fail;
301 }
302
303 pos += nread;
304 left -= nread;
305 buf += nread;
306 total_bytes_read += nread;
307 if (nread != to_read)
308 break;
309 }
310
311 *off += total_bytes_read;
312 free_bounce_buffer(tmp);
313 return total_bytes_read;
314
315 fail:
316 free_bounce_buffer(tmp);
317 return err;
318}
319
320/**
321 * Write to a regular file.
322 *
323 * @param file the file
324 * @param buf the buffer
325 * @param size length of the buffer
326 * @param off offset within the file
327 * @returns the number of written bytes on success, Linux error code otherwise
328 */
329static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size,
330 loff_t * off)
331{
332 int err;
333 void *tmp;
334 RTCCPHYS tmp_phys;
335 size_t tmp_size;
336 size_t left = size;
337 ssize_t total_bytes_written = 0;
338 struct inode *inode = GET_F_DENTRY(file)->d_inode;
339 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
340 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
341 struct sf_reg_info *sf_r = file->private_data;
342 loff_t pos;
343
344 TRACE();
345 BUG_ON(!sf_i);
346 BUG_ON(!sf_g);
347 BUG_ON(!sf_r);
348
349 if (!S_ISREG(inode->i_mode)) {
350 LogFunc(("write to non regular file %d\n", inode->i_mode));
351 return -EINVAL;
352 }
353
354 pos = *off;
355 if (file->f_flags & O_APPEND) {
356 pos = inode->i_size;
357 *off = pos;
358 }
359
360 /** XXX Check write permission according to inode->i_mode! */
361
362 if (!size)
363 return 0;
364
365 tmp =
366 alloc_bounce_buffer(&tmp_size, &tmp_phys, size,
367 __PRETTY_FUNCTION__);
368 if (!tmp)
369 return -ENOMEM;
370
371 while (left) {
372 uint32_t to_write, nwritten;
373
374 to_write = tmp_size;
375 if (to_write > left)
376 to_write = (uint32_t) left;
377
378 nwritten = to_write;
379
380 if (copy_from_user(tmp, buf, to_write)) {
381 err = -EFAULT;
382 goto fail;
383 }
384
385 err =
386 VbglR0SfWritePhysCont(&client_handle, &sf_g->map,
387 sf_r->handle, pos, &nwritten,
388 tmp_phys);
389 err = RT_FAILURE(err) ? -EPROTO : 0;
390 if (err)
391 goto fail;
392
393 pos += nwritten;
394 left -= nwritten;
395 buf += nwritten;
396 total_bytes_written += nwritten;
397 if (nwritten != to_write)
398 break;
399 }
400
401 *off += total_bytes_written;
402 if (*off > inode->i_size)
403 inode->i_size = *off;
404
405 sf_i->force_restat = 1;
406 free_bounce_buffer(tmp);
407 return total_bytes_written;
408
409 fail:
410 free_bounce_buffer(tmp);
411 return err;
412}
413
414/**
415 * Open a regular file.
416 *
417 * @param inode the inode
418 * @param file the file
419 * @returns 0 on success, Linux error code otherwise
420 */
421static int sf_reg_open(struct inode *inode, struct file *file)
422{
423 int rc, rc_linux = 0;
424 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
425 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
426 struct sf_reg_info *sf_r;
427 SHFLCREATEPARMS params;
428
429 TRACE();
430 BUG_ON(!sf_g);
431 BUG_ON(!sf_i);
432
433 LogFunc(("open %s\n", sf_i->path->String.utf8));
434
435 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
436 if (!sf_r) {
437 LogRelFunc(("could not allocate reg info\n"));
438 return -ENOMEM;
439 }
440
441 /* Already open? */
442 if (sf_i->handle != SHFL_HANDLE_NIL) {
443 /*
444 * This inode was created with sf_create_aux(). Check the CreateFlags:
445 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
446 * about the access flags (SHFL_CF_ACCESS_*).
447 */
448 sf_i->force_restat = 1;
449 sf_r->handle = sf_i->handle;
450 sf_i->handle = SHFL_HANDLE_NIL;
451 sf_i->file = file;
452 file->private_data = sf_r;
453 return 0;
454 }
455
456 RT_ZERO(params);
457 params.Handle = SHFL_HANDLE_NIL;
458 /* We check the value of params.Handle afterwards to find out if
459 * the call succeeded or failed, as the API does not seem to cleanly
460 * distinguish error and informational messages.
461 *
462 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
463 * make the shared folders host service use our fMode parameter */
464
465 if (file->f_flags & O_CREAT) {
466 LogFunc(("O_CREAT set\n"));
467 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
468 /* We ignore O_EXCL, as the Linux kernel seems to call create
469 beforehand itself, so O_EXCL should always fail. */
470 if (file->f_flags & O_TRUNC) {
471 LogFunc(("O_TRUNC set\n"));
472 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
473 } else
474 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
475 } else {
476 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
477 if (file->f_flags & O_TRUNC) {
478 LogFunc(("O_TRUNC set\n"));
479 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
480 }
481 }
482
483 switch (file->f_flags & O_ACCMODE) {
484 case O_RDONLY:
485 params.CreateFlags |= SHFL_CF_ACCESS_READ;
486 break;
487
488 case O_WRONLY:
489 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
490 break;
491
492 case O_RDWR:
493 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
494 break;
495
496 default:
497 BUG();
498 }
499
500 if (file->f_flags & O_APPEND) {
501 LogFunc(("O_APPEND set\n"));
502 params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
503 }
504
505 params.Info.Attr.fMode = inode->i_mode;
506 LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n", sf_i->path->String.utf8, file->f_flags, params.CreateFlags));
507 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, &params);
508 if (RT_FAILURE(rc)) {
509 LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n",
510 file->f_flags, params.CreateFlags, rc));
511 kfree(sf_r);
512 return -RTErrConvertToErrno(rc);
513 }
514
515 if (SHFL_HANDLE_NIL == params.Handle) {
516 switch (params.Result) {
517 case SHFL_PATH_NOT_FOUND:
518 case SHFL_FILE_NOT_FOUND:
519 rc_linux = -ENOENT;
520 break;
521 case SHFL_FILE_EXISTS:
522 rc_linux = -EEXIST;
523 break;
524 default:
525 break;
526 }
527 }
528
529 sf_i->force_restat = 1;
530 sf_r->handle = params.Handle;
531 sf_i->file = file;
532 file->private_data = sf_r;
533 return rc_linux;
534}
535
536/**
537 * Close a regular file.
538 *
539 * @param inode the inode
540 * @param file the file
541 * @returns 0 on success, Linux error code otherwise
542 */
543static int sf_reg_release(struct inode *inode, struct file *file)
544{
545 int rc;
546 struct sf_reg_info *sf_r;
547 struct sf_glob_info *sf_g;
548 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
549
550 TRACE();
551 sf_g = GET_GLOB_INFO(inode->i_sb);
552 sf_r = file->private_data;
553
554 BUG_ON(!sf_g);
555 BUG_ON(!sf_r);
556
557#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
558 /* See the smbfs source (file.c). mmap in particular can cause data to be
559 * written to the file after it is closed, which we can't cope with. We
560 * copy and paste the body of filemap_write_and_wait() here as it was not
561 * defined before 2.6.6 and not exported until quite a bit later. */
562 /* filemap_write_and_wait(inode->i_mapping); */
563 if (inode->i_mapping->nrpages
564 && filemap_fdatawrite(inode->i_mapping) != -EIO)
565 filemap_fdatawait(inode->i_mapping);
566#endif
567 rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle);
568 if (RT_FAILURE(rc))
569 LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc));
570
571 kfree(sf_r);
572 sf_i->file = NULL;
573 sf_i->handle = SHFL_HANDLE_NIL;
574 file->private_data = NULL;
575 return 0;
576}
577
578#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
579static int sf_reg_fault(struct vm_fault *vmf)
580#elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
581static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
582#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
583static struct page *sf_reg_nopage(struct vm_area_struct *vma,
584 unsigned long vaddr, int *type)
585#define SET_TYPE(t) *type = (t)
586#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
587static struct page *sf_reg_nopage(struct vm_area_struct *vma,
588 unsigned long vaddr, int unused)
589#define SET_TYPE(t)
590#endif
591{
592 struct page *page;
593 char *buf;
594 loff_t off;
595 uint32_t nread = PAGE_SIZE;
596 int err;
597#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
598 struct vm_area_struct *vma = vmf->vma;
599#endif
600 struct file *file = vma->vm_file;
601 struct inode *inode = GET_F_DENTRY(file)->d_inode;
602 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
603 struct sf_reg_info *sf_r = file->private_data;
604
605 TRACE();
606#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
607 if (vmf->pgoff > vma->vm_end)
608 return VM_FAULT_SIGBUS;
609#else
610 if (vaddr > vma->vm_end) {
611 SET_TYPE(VM_FAULT_SIGBUS);
612 return NOPAGE_SIGBUS;
613 }
614#endif
615
616 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
617 * which works on virtual addresses. On Linux cannot reliably determine the
618 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
619 page = alloc_page(GFP_USER);
620 if (!page) {
621 LogRelFunc(("failed to allocate page\n"));
622#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
623 return VM_FAULT_OOM;
624#else
625 SET_TYPE(VM_FAULT_OOM);
626 return NOPAGE_OOM;
627#endif
628 }
629
630 buf = kmap(page);
631#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
632 off = (vmf->pgoff << PAGE_SHIFT);
633#else
634 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
635#endif
636 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
637 if (err) {
638 kunmap(page);
639 put_page(page);
640#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
641 return VM_FAULT_SIGBUS;
642#else
643 SET_TYPE(VM_FAULT_SIGBUS);
644 return NOPAGE_SIGBUS;
645#endif
646 }
647
648 BUG_ON(nread > PAGE_SIZE);
649 if (!nread) {
650#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
651 clear_user_page(page_address(page), vmf->pgoff, page);
652#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
653 clear_user_page(page_address(page), vaddr, page);
654#else
655 clear_user_page(page_address(page), vaddr);
656#endif
657 } else
658 memset(buf + nread, 0, PAGE_SIZE - nread);
659
660 flush_dcache_page(page);
661 kunmap(page);
662#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
663 vmf->page = page;
664 return 0;
665#else
666 SET_TYPE(VM_FAULT_MAJOR);
667 return page;
668#endif
669}
670
671static struct vm_operations_struct sf_vma_ops = {
672#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
673 .fault = sf_reg_fault
674#else
675 .nopage = sf_reg_nopage
676#endif
677};
678
679static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
680{
681 TRACE();
682 if (vma->vm_flags & VM_SHARED) {
683 LogFunc(("shared mmapping not available\n"));
684 return -EINVAL;
685 }
686
687 vma->vm_ops = &sf_vma_ops;
688 return 0;
689}
690
691struct file_operations sf_reg_fops = {
692 .read = sf_reg_read,
693 .open = sf_reg_open,
694 .write = sf_reg_write,
695 .release = sf_reg_release,
696 .mmap = sf_reg_mmap,
697#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
698#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
699/** @todo This code is known to cause caching of data which should not be
700 * cached. Investigate. */
701#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
702 .splice_read = sf_splice_read,
703#else
704 .sendfile = generic_file_sendfile,
705#endif
706 .aio_read = generic_file_aio_read,
707 .aio_write = generic_file_aio_write,
708#endif
709#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
710 .fsync = noop_fsync,
711#else
712 .fsync = simple_sync_file,
713#endif
714 .llseek = generic_file_llseek,
715#endif
716};
717
718struct inode_operations sf_reg_iops = {
719#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
720 .revalidate = sf_inode_revalidate
721#else
722 .getattr = sf_getattr,
723 .setattr = sf_setattr
724#endif
725};
726
727#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
728static int sf_readpage(struct file *file, struct page *page)
729{
730 struct inode *inode = GET_F_DENTRY(file)->d_inode;
731 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
732 struct sf_reg_info *sf_r = file->private_data;
733 uint32_t nread = PAGE_SIZE;
734 char *buf;
735 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
736 int ret;
737
738 TRACE();
739
740 buf = kmap(page);
741 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
742 if (ret) {
743 kunmap(page);
744 if (PageLocked(page))
745 unlock_page(page);
746 return ret;
747 }
748 BUG_ON(nread > PAGE_SIZE);
749 memset(&buf[nread], 0, PAGE_SIZE - nread);
750 flush_dcache_page(page);
751 kunmap(page);
752 SetPageUptodate(page);
753 unlock_page(page);
754 return 0;
755}
756
757static int sf_writepage(struct page *page, struct writeback_control *wbc)
758{
759 struct address_space *mapping = page->mapping;
760 struct inode *inode = mapping->host;
761 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
762 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
763 struct file *file = sf_i->file;
764 struct sf_reg_info *sf_r = file->private_data;
765 char *buf;
766 uint32_t nwritten = PAGE_SIZE;
767 int end_index = inode->i_size >> PAGE_SHIFT;
768 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
769 int err;
770
771 TRACE();
772
773 if (page->index >= end_index)
774 nwritten = inode->i_size & (PAGE_SIZE - 1);
775
776 buf = kmap(page);
777
778 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
779 if (err < 0) {
780 ClearPageUptodate(page);
781 goto out;
782 }
783
784 if (off > inode->i_size)
785 inode->i_size = off;
786
787 if (PageError(page))
788 ClearPageError(page);
789 err = 0;
790
791 out:
792 kunmap(page);
793
794 unlock_page(page);
795 return err;
796}
797
798#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
799int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
800 unsigned len, unsigned flags, struct page **pagep,
801 void **fsdata)
802{
803 TRACE();
804
805 return simple_write_begin(file, mapping, pos, len, flags, pagep,
806 fsdata);
807}
808
809int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
810 unsigned len, unsigned copied, struct page *page, void *fsdata)
811{
812 struct inode *inode = mapping->host;
813 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
814 struct sf_reg_info *sf_r = file->private_data;
815 void *buf;
816 unsigned from = pos & (PAGE_SIZE - 1);
817 uint32_t nwritten = len;
818 int err;
819
820 TRACE();
821
822 buf = kmap(page);
823 err =
824 sf_reg_write_aux(__func__, sf_g, sf_r, buf + from, &nwritten, pos);
825 kunmap(page);
826
827 if (!PageUptodate(page) && err == PAGE_SIZE)
828 SetPageUptodate(page);
829
830 if (err >= 0) {
831 pos += nwritten;
832 if (pos > inode->i_size)
833 inode->i_size = pos;
834 }
835
836 unlock_page(page);
837#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
838 put_page(page);
839#else
840 page_cache_release(page);
841#endif
842
843 return nwritten;
844}
845
846#endif /* KERNEL_VERSION >= 2.6.24 */
847
848struct address_space_operations sf_reg_aops = {
849 .readpage = sf_readpage,
850 .writepage = sf_writepage,
851#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
852 .write_begin = sf_write_begin,
853 .write_end = sf_write_end,
854#else
855 .prepare_write = simple_prepare_write,
856 .commit_write = simple_commit_write,
857#endif
858};
859#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette