VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 69384

Last change on this file since 69384 was 69165, checked in by vboxsync, 7 years ago

Additions/linux/sharedfolders: stop using generic caching I/O functions.
bugref:2548: Complete shared folders filesystem for Linux guests

Our shared folder file-system driver currently makes use of generic
implementations of read_iter, write_iter and send_file which assume that file-
system data may safely be cached. This does not apply to our shared folders
driver. For kernels 2.6.31 and later we can simply stop using those
functions and the kernel will fall back to defaults which work for us. For
older kernels this still needs to be fixed, and is visible when files are
memory-mapped. See public bugs #819 and #17053.

Fix contributed by David Ellingsworth <david@…>.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.5 KB
Line 
1/* $Id: regops.c 69165 2017-10-23 15:11:37Z vboxsync $ */
2/** @file
3 * vboxsf - VBox Linux Shared Folders, Regular file inode and file operations.
4 */
5
6/*
7 * Copyright (C) 2006-2016 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * Limitations: only COW memory mapping is supported
20 */
21
22#include "vfsmod.h"
23
24static void *alloc_bounce_buffer(size_t *tmp_sizep, PRTCCPHYS physp, size_t
25 xfer_size, const char *caller)
26{
27 size_t tmp_size;
28 void *tmp;
29
30 /* try for big first. */
31 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
32 if (tmp_size > 16U*_1K)
33 tmp_size = 16U*_1K;
34 tmp = kmalloc(tmp_size, GFP_KERNEL);
35 if (!tmp)
36 {
37 /* fall back on a page sized buffer. */
38 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
39 if (!tmp)
40 {
41 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
42 return NULL;
43 }
44 tmp_size = PAGE_SIZE;
45 }
46
47 *tmp_sizep = tmp_size;
48 *physp = virt_to_phys(tmp);
49 return tmp;
50}
51
52static void free_bounce_buffer(void *tmp)
53{
54 kfree (tmp);
55}
56
57
58/* fops */
59static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
60 struct sf_reg_info *sf_r, void *buf,
61 uint32_t *nread, uint64_t pos)
62{
63 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
64 * contiguous in physical memory (kmalloc or single page), we should
65 * use a physical address here to speed things up. */
66 int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->handle,
67 pos, nread, buf, false /* already locked? */);
68 if (RT_FAILURE(rc))
69 {
70 LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller, rc));
71 return -EPROTO;
72 }
73 return 0;
74}
75
76static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
77 struct sf_reg_info *sf_r, void *buf,
78 uint32_t *nwritten, uint64_t pos)
79{
80 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
81 * contiguous in physical memory (kmalloc or single page), we should
82 * use a physical address here to speed things up. */
83 int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle,
84 pos, nwritten, buf, false /* already locked? */);
85 if (RT_FAILURE(rc))
86 {
87 LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n",
88 caller, rc));
89 return -EPROTO;
90 }
91 return 0;
92}
93
94/**
95 * Read from a regular file.
96 *
97 * @param file the file
98 * @param buf the buffer
99 * @param size length of the buffer
100 * @param off offset within the file
101 * @returns the number of read bytes on success, Linux error code otherwise
102 */
103static ssize_t sf_reg_read(struct file *file, char *buf, size_t size, loff_t *off)
104{
105 int err;
106 void *tmp;
107 RTCCPHYS tmp_phys;
108 size_t tmp_size;
109 size_t left = size;
110 ssize_t total_bytes_read = 0;
111 struct inode *inode = GET_F_DENTRY(file)->d_inode;
112 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
113 struct sf_reg_info *sf_r = file->private_data;
114 loff_t pos = *off;
115
116 TRACE();
117 if (!S_ISREG(inode->i_mode))
118 {
119 LogFunc(("read from non regular file %d\n", inode->i_mode));
120 return -EINVAL;
121 }
122
123 /** XXX Check read permission according to inode->i_mode! */
124
125 if (!size)
126 return 0;
127
128 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
129 if (!tmp)
130 return -ENOMEM;
131
132 while (left)
133 {
134 uint32_t to_read, nread;
135
136 to_read = tmp_size;
137 if (to_read > left)
138 to_read = (uint32_t) left;
139
140 nread = to_read;
141
142 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
143 if (err)
144 goto fail;
145
146 if (copy_to_user(buf, tmp, nread))
147 {
148 err = -EFAULT;
149 goto fail;
150 }
151
152 pos += nread;
153 left -= nread;
154 buf += nread;
155 total_bytes_read += nread;
156 if (nread != to_read)
157 break;
158 }
159
160 *off += total_bytes_read;
161 free_bounce_buffer(tmp);
162 return total_bytes_read;
163
164fail:
165 free_bounce_buffer(tmp);
166 return err;
167}
168
169/**
170 * Write to a regular file.
171 *
172 * @param file the file
173 * @param buf the buffer
174 * @param size length of the buffer
175 * @param off offset within the file
176 * @returns the number of written bytes on success, Linux error code otherwise
177 */
178static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size, loff_t *off)
179{
180 int err;
181 void *tmp;
182 RTCCPHYS tmp_phys;
183 size_t tmp_size;
184 size_t left = size;
185 ssize_t total_bytes_written = 0;
186 struct inode *inode = GET_F_DENTRY(file)->d_inode;
187 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
188 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
189 struct sf_reg_info *sf_r = file->private_data;
190 loff_t pos;
191
192 TRACE();
193 BUG_ON(!sf_i);
194 BUG_ON(!sf_g);
195 BUG_ON(!sf_r);
196
197 if (!S_ISREG(inode->i_mode))
198 {
199 LogFunc(("write to non regular file %d\n", inode->i_mode));
200 return -EINVAL;
201 }
202
203 pos = *off;
204 if (file->f_flags & O_APPEND)
205 {
206 pos = inode->i_size;
207 *off = pos;
208 }
209
210 /** XXX Check write permission according to inode->i_mode! */
211
212 if (!size)
213 return 0;
214
215 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
216 if (!tmp)
217 return -ENOMEM;
218
219 while (left)
220 {
221 uint32_t to_write, nwritten;
222
223 to_write = tmp_size;
224 if (to_write > left)
225 to_write = (uint32_t) left;
226
227 nwritten = to_write;
228
229 if (copy_from_user(tmp, buf, to_write))
230 {
231 err = -EFAULT;
232 goto fail;
233 }
234
235 err = VbglR0SfWritePhysCont(&client_handle, &sf_g->map, sf_r->handle,
236 pos, &nwritten, tmp_phys);
237 err = RT_FAILURE(err) ? -EPROTO : 0;
238 if (err)
239 goto fail;
240
241 pos += nwritten;
242 left -= nwritten;
243 buf += nwritten;
244 total_bytes_written += nwritten;
245 if (nwritten != to_write)
246 break;
247 }
248
249 *off += total_bytes_written;
250 if (*off > inode->i_size)
251 inode->i_size = *off;
252
253 sf_i->force_restat = 1;
254 free_bounce_buffer(tmp);
255 return total_bytes_written;
256
257fail:
258 free_bounce_buffer(tmp);
259 return err;
260}
261
262/**
263 * Open a regular file.
264 *
265 * @param inode the inode
266 * @param file the file
267 * @returns 0 on success, Linux error code otherwise
268 */
269static int sf_reg_open(struct inode *inode, struct file *file)
270{
271 int rc, rc_linux = 0;
272 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
273 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
274 struct sf_reg_info *sf_r;
275 SHFLCREATEPARMS params;
276
277 TRACE();
278 BUG_ON(!sf_g);
279 BUG_ON(!sf_i);
280
281 LogFunc(("open %s\n", sf_i->path->String.utf8));
282
283 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
284 if (!sf_r)
285 {
286 LogRelFunc(("could not allocate reg info\n"));
287 return -ENOMEM;
288 }
289
290 /* Already open? */
291 if (sf_i->handle != SHFL_HANDLE_NIL)
292 {
293 /*
294 * This inode was created with sf_create_aux(). Check the CreateFlags:
295 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
296 * about the access flags (SHFL_CF_ACCESS_*).
297 */
298 sf_i->force_restat = 1;
299 sf_r->handle = sf_i->handle;
300 sf_i->handle = SHFL_HANDLE_NIL;
301 sf_i->file = file;
302 file->private_data = sf_r;
303 return 0;
304 }
305
306 RT_ZERO(params);
307 params.Handle = SHFL_HANDLE_NIL;
308 /* We check the value of params.Handle afterwards to find out if
309 * the call succeeded or failed, as the API does not seem to cleanly
310 * distinguish error and informational messages.
311 *
312 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
313 * make the shared folders host service use our fMode parameter */
314
315 if (file->f_flags & O_CREAT)
316 {
317 LogFunc(("O_CREAT set\n"));
318 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
319 /* We ignore O_EXCL, as the Linux kernel seems to call create
320 beforehand itself, so O_EXCL should always fail. */
321 if (file->f_flags & O_TRUNC)
322 {
323 LogFunc(("O_TRUNC set\n"));
324 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
325 }
326 else
327 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
328 }
329 else
330 {
331 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
332 if (file->f_flags & O_TRUNC)
333 {
334 LogFunc(("O_TRUNC set\n"));
335 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
336 }
337 }
338
339 switch (file->f_flags & O_ACCMODE)
340 {
341 case O_RDONLY:
342 params.CreateFlags |= SHFL_CF_ACCESS_READ;
343 break;
344
345 case O_WRONLY:
346 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
347 break;
348
349 case O_RDWR:
350 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
351 break;
352
353 default:
354 BUG ();
355 }
356
357 if (file->f_flags & O_APPEND)
358 {
359 LogFunc(("O_APPEND set\n"));
360 params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
361 }
362
363 params.Info.Attr.fMode = inode->i_mode;
364 LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n",
365 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags));
366 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, &params);
367 if (RT_FAILURE(rc))
368 {
369 LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n",
370 file->f_flags, params.CreateFlags, rc));
371 kfree(sf_r);
372 return -RTErrConvertToErrno(rc);
373 }
374
375 if (SHFL_HANDLE_NIL == params.Handle)
376 {
377 switch (params.Result)
378 {
379 case SHFL_PATH_NOT_FOUND:
380 case SHFL_FILE_NOT_FOUND:
381 rc_linux = -ENOENT;
382 break;
383 case SHFL_FILE_EXISTS:
384 rc_linux = -EEXIST;
385 break;
386 default:
387 break;
388 }
389 }
390
391 sf_i->force_restat = 1;
392 sf_r->handle = params.Handle;
393 sf_i->file = file;
394 file->private_data = sf_r;
395 return rc_linux;
396}
397
398/**
399 * Close a regular file.
400 *
401 * @param inode the inode
402 * @param file the file
403 * @returns 0 on success, Linux error code otherwise
404 */
405static int sf_reg_release(struct inode *inode, struct file *file)
406{
407 int rc;
408 struct sf_reg_info *sf_r;
409 struct sf_glob_info *sf_g;
410 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
411
412 TRACE();
413 sf_g = GET_GLOB_INFO(inode->i_sb);
414 sf_r = file->private_data;
415
416 BUG_ON(!sf_g);
417 BUG_ON(!sf_r);
418
419#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
420 /* See the smbfs source (file.c). mmap in particular can cause data to be
421 * written to the file after it is closed, which we can't cope with. We
422 * copy and paste the body of filemap_write_and_wait() here as it was not
423 * defined before 2.6.6 and not exported until quite a bit later. */
424 /* filemap_write_and_wait(inode->i_mapping); */
425 if ( inode->i_mapping->nrpages
426 && filemap_fdatawrite(inode->i_mapping) != -EIO)
427 filemap_fdatawait(inode->i_mapping);
428#endif
429 rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle);
430 if (RT_FAILURE(rc))
431 LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc));
432
433 kfree(sf_r);
434 sf_i->file = NULL;
435 sf_i->handle = SHFL_HANDLE_NIL;
436 file->private_data = NULL;
437 return 0;
438}
439
440#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
441static int sf_reg_fault(struct vm_fault *vmf)
442#elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
443static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
444#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
445static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type)
446# define SET_TYPE(t) *type = (t)
447#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
448static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused)
449# define SET_TYPE(t)
450#endif
451{
452 struct page *page;
453 char *buf;
454 loff_t off;
455 uint32_t nread = PAGE_SIZE;
456 int err;
457#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
458 struct vm_area_struct *vma = vmf->vma;
459#endif
460 struct file *file = vma->vm_file;
461 struct inode *inode = GET_F_DENTRY(file)->d_inode;
462 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
463 struct sf_reg_info *sf_r = file->private_data;
464
465 TRACE();
466#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
467 if (vmf->pgoff > vma->vm_end)
468 return VM_FAULT_SIGBUS;
469#else
470 if (vaddr > vma->vm_end)
471 {
472 SET_TYPE(VM_FAULT_SIGBUS);
473 return NOPAGE_SIGBUS;
474 }
475#endif
476
477 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
478 * which works on virtual addresses. On Linux cannot reliably determine the
479 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
480 page = alloc_page(GFP_USER);
481 if (!page) {
482 LogRelFunc(("failed to allocate page\n"));
483#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
484 return VM_FAULT_OOM;
485#else
486 SET_TYPE(VM_FAULT_OOM);
487 return NOPAGE_OOM;
488#endif
489 }
490
491 buf = kmap(page);
492#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
493 off = (vmf->pgoff << PAGE_SHIFT);
494#else
495 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
496#endif
497 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
498 if (err)
499 {
500 kunmap(page);
501 put_page(page);
502#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
503 return VM_FAULT_SIGBUS;
504#else
505 SET_TYPE(VM_FAULT_SIGBUS);
506 return NOPAGE_SIGBUS;
507#endif
508 }
509
510 BUG_ON (nread > PAGE_SIZE);
511 if (!nread)
512 {
513#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
514 clear_user_page(page_address(page), vmf->pgoff, page);
515#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
516 clear_user_page(page_address(page), vaddr, page);
517#else
518 clear_user_page(page_address(page), vaddr);
519#endif
520 }
521 else
522 memset(buf + nread, 0, PAGE_SIZE - nread);
523
524 flush_dcache_page(page);
525 kunmap(page);
526#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
527 vmf->page = page;
528 return 0;
529#else
530 SET_TYPE(VM_FAULT_MAJOR);
531 return page;
532#endif
533}
534
535static struct vm_operations_struct sf_vma_ops =
536{
537#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
538 .fault = sf_reg_fault
539#else
540 .nopage = sf_reg_nopage
541#endif
542};
543
544static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
545{
546 TRACE();
547 if (vma->vm_flags & VM_SHARED)
548 {
549 LogFunc(("shared mmapping not available\n"));
550 return -EINVAL;
551 }
552
553 vma->vm_ops = &sf_vma_ops;
554 return 0;
555}
556
557struct file_operations sf_reg_fops =
558{
559 .read = sf_reg_read,
560 .open = sf_reg_open,
561 .write = sf_reg_write,
562 .release = sf_reg_release,
563 .mmap = sf_reg_mmap,
564#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
565# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
566/** @todo This code is known to cause caching of data which should not be
567 * cached. Investigate. */
568# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
569 .splice_read = generic_file_splice_read,
570# else
571 .sendfile = generic_file_sendfile,
572# endif
573 .aio_read = generic_file_aio_read,
574 .aio_write = generic_file_aio_write,
575# endif
576# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
577 .fsync = noop_fsync,
578# else
579 .fsync = simple_sync_file,
580# endif
581 .llseek = generic_file_llseek,
582#endif
583};
584
585
586struct inode_operations sf_reg_iops =
587{
588#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
589 .revalidate = sf_inode_revalidate
590#else
591 .getattr = sf_getattr,
592 .setattr = sf_setattr
593#endif
594};
595
596
597#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
598static int sf_readpage(struct file *file, struct page *page)
599{
600 struct inode *inode = GET_F_DENTRY(file)->d_inode;
601 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
602 struct sf_reg_info *sf_r = file->private_data;
603 uint32_t nread = PAGE_SIZE;
604 char *buf;
605 loff_t off = ((loff_t)page->index) << PAGE_SHIFT;
606 int ret;
607
608 TRACE();
609
610 buf = kmap(page);
611 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
612 if (ret)
613 {
614 kunmap(page);
615 if (PageLocked(page))
616 unlock_page(page);
617 return ret;
618 }
619 BUG_ON(nread > PAGE_SIZE);
620 memset(&buf[nread], 0, PAGE_SIZE - nread);
621 flush_dcache_page(page);
622 kunmap(page);
623 SetPageUptodate(page);
624 unlock_page(page);
625 return 0;
626}
627
628static int
629sf_writepage(struct page *page, struct writeback_control *wbc)
630{
631 struct address_space *mapping = page->mapping;
632 struct inode *inode = mapping->host;
633 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
634 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
635 struct file *file = sf_i->file;
636 struct sf_reg_info *sf_r = file->private_data;
637 char *buf;
638 uint32_t nwritten = PAGE_SIZE;
639 int end_index = inode->i_size >> PAGE_SHIFT;
640 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
641 int err;
642
643 TRACE();
644
645 if (page->index >= end_index)
646 nwritten = inode->i_size & (PAGE_SIZE-1);
647
648 buf = kmap(page);
649
650 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
651 if (err < 0)
652 {
653 ClearPageUptodate(page);
654 goto out;
655 }
656
657 if (off > inode->i_size)
658 inode->i_size = off;
659
660 if (PageError(page))
661 ClearPageError(page);
662 err = 0;
663
664out:
665 kunmap(page);
666
667 unlock_page(page);
668 return err;
669}
670
671# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
672int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
673 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
674{
675 TRACE();
676
677 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
678}
679
680int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
681 unsigned len, unsigned copied, struct page *page, void *fsdata)
682{
683 struct inode *inode = mapping->host;
684 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
685 struct sf_reg_info *sf_r = file->private_data;
686 void *buf;
687 unsigned from = pos & (PAGE_SIZE - 1);
688 uint32_t nwritten = len;
689 int err;
690
691 TRACE();
692
693 buf = kmap(page);
694 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf+from, &nwritten, pos);
695 kunmap(page);
696
697 if (!PageUptodate(page) && err == PAGE_SIZE)
698 SetPageUptodate(page);
699
700 if (err >= 0) {
701 pos += nwritten;
702 if (pos > inode->i_size)
703 inode->i_size = pos;
704 }
705
706 unlock_page(page);
707#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
708 put_page(page);
709#else
710 page_cache_release(page);
711#endif
712
713 return nwritten;
714}
715
716# endif /* KERNEL_VERSION >= 2.6.24 */
717
718struct address_space_operations sf_reg_aops =
719{
720 .readpage = sf_readpage,
721 .writepage = sf_writepage,
722# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
723 .write_begin = sf_write_begin,
724 .write_end = sf_write_end,
725# else
726 .prepare_write = simple_prepare_write,
727 .commit_write = simple_commit_write,
728# endif
729};
730#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette