VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 70687

Last change on this file since 70687 was 70461, checked in by vboxsync, 7 years ago

Replace a few tabs that made it into a source file in r120068 and (I believe) was the cause of NT burn

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 23.3 KB
Line 
1/* $Id: regops.c 70461 2018-01-04 17:35:03Z vboxsync $ */
2/** @file
3 * vboxsf - VBox Linux Shared Folders, Regular file inode and file operations.
4 */
5
6/*
7 * Copyright (C) 2006-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18/*
19 * Limitations: only COW memory mapping is supported
20 */
21
22#include "vfsmod.h"
23
24static void *alloc_bounce_buffer(size_t *tmp_sizep, PRTCCPHYS physp, size_t
25 xfer_size, const char *caller)
26{
27 size_t tmp_size;
28 void *tmp;
29
30 /* try for big first. */
31 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
32 if (tmp_size > 16U*_1K)
33 tmp_size = 16U*_1K;
34 tmp = kmalloc(tmp_size, GFP_KERNEL);
35 if (!tmp)
36 {
37 /* fall back on a page sized buffer. */
38 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
39 if (!tmp)
40 {
41 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
42 return NULL;
43 }
44 tmp_size = PAGE_SIZE;
45 }
46
47 *tmp_sizep = tmp_size;
48 *physp = virt_to_phys(tmp);
49 return tmp;
50}
51
52static void free_bounce_buffer(void *tmp)
53{
54 kfree (tmp);
55}
56
57
58/* fops */
59static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
60 struct sf_reg_info *sf_r, void *buf,
61 uint32_t *nread, uint64_t pos)
62{
63 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
64 * contiguous in physical memory (kmalloc or single page), we should
65 * use a physical address here to speed things up. */
66 int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->handle,
67 pos, nread, buf, false /* already locked? */);
68 if (RT_FAILURE(rc))
69 {
70 LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller, rc));
71 return -EPROTO;
72 }
73 return 0;
74}
75
76static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
77 struct sf_reg_info *sf_r, void *buf,
78 uint32_t *nwritten, uint64_t pos)
79{
80 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
81 * contiguous in physical memory (kmalloc or single page), we should
82 * use a physical address here to speed things up. */
83 int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle,
84 pos, nwritten, buf, false /* already locked? */);
85 if (RT_FAILURE(rc))
86 {
87 LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n",
88 caller, rc));
89 return -EPROTO;
90 }
91 return 0;
92}
93
94#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) && \
95 LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
96
97void free_pipebuf(struct page *kpage)
98{
99 kunmap(kpage);
100 __free_pages(kpage, 0);
101}
102
103void *sf_pipe_buf_map(struct pipe_inode_info *pipe,
104 struct pipe_buffer *pipe_buf, int atomic)
105{
106 return 0;
107}
108
109void sf_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf)
110{
111}
112
113void sf_pipe_buf_unmap(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf, void *map_data)
114{
115}
116
117int sf_pipe_buf_steal(struct pipe_inode_info *pipe,
118 struct pipe_buffer *pipe_buf) {
119 return 0;
120}
121
122static void sf_pipe_buf_release(struct pipe_inode_info *pipe,
123 struct pipe_buffer *pipe_buf)
124{
125 free_pipebuf(pipe_buf->page);
126}
127
128int sf_pipe_buf_confirm(struct pipe_inode_info *info,
129 struct pipe_buffer *pipe_buf)
130{
131 return 0;
132}
133
134static struct pipe_buf_operations sf_pipe_buf_ops = {
135 .can_merge = 0,
136 .map = sf_pipe_buf_map,
137 .unmap = sf_pipe_buf_unmap,
138 .confirm = sf_pipe_buf_confirm,
139 .release = sf_pipe_buf_release,
140 .steal = sf_pipe_buf_steal,
141 .get = sf_pipe_buf_get,
142};
143
144#define LOCK_PIPE(pipe) \
145 if (pipe->inode) \
146 mutex_lock(&pipe->inode->i_mutex);
147
148#define UNLOCK_PIPE(pipe) \
149 if (pipe->inode) \
150 mutex_unlock(&pipe->inode->i_mutex);
151
152ssize_t
153sf_splice_read(struct file *in, loff_t *poffset,
154 struct pipe_inode_info *pipe, size_t len,
155 unsigned int flags)
156{
157 size_t bytes_remaining = len;
158 loff_t orig_offset = *poffset;
159 loff_t offset = orig_offset;
160 struct inode *inode = GET_F_DENTRY(in)->d_inode;
161 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
162 struct sf_reg_info *sf_r = in->private_data;
163 ssize_t retval;
164 struct page *kpage = 0;
165 size_t nsent = 0;
166
167 TRACE();
168 if (!S_ISREG(inode->i_mode))
169 {
170 LogFunc(("read from non regular file %d\n", inode->i_mode));
171 return -EINVAL;
172 }
173 if (!len) {
174 return 0;
175 }
176
177 LOCK_PIPE(pipe);
178
179 uint32_t req_size = 0;
180 while (bytes_remaining > 0)
181 {
182 kpage = alloc_page(GFP_KERNEL);
183 if (unlikely(kpage == NULL))
184 {
185 UNLOCK_PIPE(pipe);
186 return -ENOMEM;
187 }
188 req_size = 0;
189 uint32_t nread = req_size = (uint32_t)min(bytes_remaining, (size_t)PAGE_SIZE);
190 uint32_t chunk = 0;
191 void *kbuf = kmap(kpage);
192 while (chunk < req_size)
193 {
194 retval = sf_reg_read_aux(__func__, sf_g, sf_r, kbuf + chunk, &nread, offset);
195 if (retval < 0)
196 goto err;
197 if (nread == 0)
198 break;
199 chunk += nread;
200 offset += nread;
201 nread = req_size - chunk;
202 }
203 if (!pipe->readers)
204 {
205 send_sig(SIGPIPE, current, 0);
206 retval = -EPIPE;
207 goto err;
208 }
209 if (pipe->nrbufs < PIPE_BUFFERS)
210 {
211 struct pipe_buffer *pipebuf =
212 pipe->bufs + ((pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1));
213 pipebuf->page = kpage;
214 pipebuf->ops = &sf_pipe_buf_ops;
215 pipebuf->len = req_size;
216 pipebuf->offset = 0;
217 pipebuf->private = 0;
218 pipebuf->flags = 0;
219 pipe->nrbufs++;
220 nsent += req_size;
221 bytes_remaining -= req_size;
222 if (signal_pending(current))
223 break;
224 }
225 else /* pipe full */
226 {
227 if (flags & SPLICE_F_NONBLOCK) {
228 retval = -EAGAIN;
229 goto err;
230 }
231 free_pipebuf(kpage);
232 break;
233 }
234 }
235 UNLOCK_PIPE(pipe);
236 if (!nsent && signal_pending(current))
237 return -ERESTARTSYS;
238 *poffset += nsent;
239 return offset - orig_offset;
240
241err:
242 UNLOCK_PIPE(pipe);
243 free_pipebuf(kpage);
244 return retval;
245}
246
247#endif
248
249/**
250 * Read from a regular file.
251 *
252 * @param file the file
253 * @param buf the buffer
254 * @param size length of the buffer
255 * @param off offset within the file
256 * @returns the number of read bytes on success, Linux error code otherwise
257 */
258static ssize_t sf_reg_read(struct file *file, char *buf, size_t size, loff_t *off)
259{
260 int err;
261 void *tmp;
262 RTCCPHYS tmp_phys;
263 size_t tmp_size;
264 size_t left = size;
265 ssize_t total_bytes_read = 0;
266 struct inode *inode = GET_F_DENTRY(file)->d_inode;
267 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
268 struct sf_reg_info *sf_r = file->private_data;
269 loff_t pos = *off;
270
271 TRACE();
272 if (!S_ISREG(inode->i_mode))
273 {
274 LogFunc(("read from non regular file %d\n", inode->i_mode));
275 return -EINVAL;
276 }
277
278 /** XXX Check read permission according to inode->i_mode! */
279
280 if (!size)
281 return 0;
282
283 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
284 if (!tmp)
285 return -ENOMEM;
286
287 while (left)
288 {
289 uint32_t to_read, nread;
290
291 to_read = tmp_size;
292 if (to_read > left)
293 to_read = (uint32_t) left;
294
295 nread = to_read;
296
297 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
298 if (err)
299 goto fail;
300
301 if (copy_to_user(buf, tmp, nread))
302 {
303 err = -EFAULT;
304 goto fail;
305 }
306
307 pos += nread;
308 left -= nread;
309 buf += nread;
310 total_bytes_read += nread;
311 if (nread != to_read)
312 break;
313 }
314
315 *off += total_bytes_read;
316 free_bounce_buffer(tmp);
317 return total_bytes_read;
318
319fail:
320 free_bounce_buffer(tmp);
321 return err;
322}
323
324/**
325 * Write to a regular file.
326 *
327 * @param file the file
328 * @param buf the buffer
329 * @param size length of the buffer
330 * @param off offset within the file
331 * @returns the number of written bytes on success, Linux error code otherwise
332 */
333static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size, loff_t *off)
334{
335 int err;
336 void *tmp;
337 RTCCPHYS tmp_phys;
338 size_t tmp_size;
339 size_t left = size;
340 ssize_t total_bytes_written = 0;
341 struct inode *inode = GET_F_DENTRY(file)->d_inode;
342 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
343 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
344 struct sf_reg_info *sf_r = file->private_data;
345 loff_t pos;
346
347 TRACE();
348 BUG_ON(!sf_i);
349 BUG_ON(!sf_g);
350 BUG_ON(!sf_r);
351
352 if (!S_ISREG(inode->i_mode))
353 {
354 LogFunc(("write to non regular file %d\n", inode->i_mode));
355 return -EINVAL;
356 }
357
358 pos = *off;
359 if (file->f_flags & O_APPEND)
360 {
361 pos = inode->i_size;
362 *off = pos;
363 }
364
365 /** XXX Check write permission according to inode->i_mode! */
366
367 if (!size)
368 return 0;
369
370 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
371 if (!tmp)
372 return -ENOMEM;
373
374 while (left)
375 {
376 uint32_t to_write, nwritten;
377
378 to_write = tmp_size;
379 if (to_write > left)
380 to_write = (uint32_t) left;
381
382 nwritten = to_write;
383
384 if (copy_from_user(tmp, buf, to_write))
385 {
386 err = -EFAULT;
387 goto fail;
388 }
389
390 err = VbglR0SfWritePhysCont(&client_handle, &sf_g->map, sf_r->handle,
391 pos, &nwritten, tmp_phys);
392 err = RT_FAILURE(err) ? -EPROTO : 0;
393 if (err)
394 goto fail;
395
396 pos += nwritten;
397 left -= nwritten;
398 buf += nwritten;
399 total_bytes_written += nwritten;
400 if (nwritten != to_write)
401 break;
402 }
403
404 *off += total_bytes_written;
405 if (*off > inode->i_size)
406 inode->i_size = *off;
407
408 sf_i->force_restat = 1;
409 free_bounce_buffer(tmp);
410 return total_bytes_written;
411
412fail:
413 free_bounce_buffer(tmp);
414 return err;
415}
416
417/**
418 * Open a regular file.
419 *
420 * @param inode the inode
421 * @param file the file
422 * @returns 0 on success, Linux error code otherwise
423 */
424static int sf_reg_open(struct inode *inode, struct file *file)
425{
426 int rc, rc_linux = 0;
427 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
428 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
429 struct sf_reg_info *sf_r;
430 SHFLCREATEPARMS params;
431
432 TRACE();
433 BUG_ON(!sf_g);
434 BUG_ON(!sf_i);
435
436 LogFunc(("open %s\n", sf_i->path->String.utf8));
437
438 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
439 if (!sf_r)
440 {
441 LogRelFunc(("could not allocate reg info\n"));
442 return -ENOMEM;
443 }
444
445 /* Already open? */
446 if (sf_i->handle != SHFL_HANDLE_NIL)
447 {
448 /*
449 * This inode was created with sf_create_aux(). Check the CreateFlags:
450 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
451 * about the access flags (SHFL_CF_ACCESS_*).
452 */
453 sf_i->force_restat = 1;
454 sf_r->handle = sf_i->handle;
455 sf_i->handle = SHFL_HANDLE_NIL;
456 sf_i->file = file;
457 file->private_data = sf_r;
458 return 0;
459 }
460
461 RT_ZERO(params);
462 params.Handle = SHFL_HANDLE_NIL;
463 /* We check the value of params.Handle afterwards to find out if
464 * the call succeeded or failed, as the API does not seem to cleanly
465 * distinguish error and informational messages.
466 *
467 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
468 * make the shared folders host service use our fMode parameter */
469
470 if (file->f_flags & O_CREAT)
471 {
472 LogFunc(("O_CREAT set\n"));
473 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
474 /* We ignore O_EXCL, as the Linux kernel seems to call create
475 beforehand itself, so O_EXCL should always fail. */
476 if (file->f_flags & O_TRUNC)
477 {
478 LogFunc(("O_TRUNC set\n"));
479 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
480 }
481 else
482 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
483 }
484 else
485 {
486 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
487 if (file->f_flags & O_TRUNC)
488 {
489 LogFunc(("O_TRUNC set\n"));
490 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
491 }
492 }
493
494 switch (file->f_flags & O_ACCMODE)
495 {
496 case O_RDONLY:
497 params.CreateFlags |= SHFL_CF_ACCESS_READ;
498 break;
499
500 case O_WRONLY:
501 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
502 break;
503
504 case O_RDWR:
505 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
506 break;
507
508 default:
509 BUG ();
510 }
511
512 if (file->f_flags & O_APPEND)
513 {
514 LogFunc(("O_APPEND set\n"));
515 params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
516 }
517
518 params.Info.Attr.fMode = inode->i_mode;
519 LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n",
520 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags));
521 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, &params);
522 if (RT_FAILURE(rc))
523 {
524 LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n",
525 file->f_flags, params.CreateFlags, rc));
526 kfree(sf_r);
527 return -RTErrConvertToErrno(rc);
528 }
529
530 if (SHFL_HANDLE_NIL == params.Handle)
531 {
532 switch (params.Result)
533 {
534 case SHFL_PATH_NOT_FOUND:
535 case SHFL_FILE_NOT_FOUND:
536 rc_linux = -ENOENT;
537 break;
538 case SHFL_FILE_EXISTS:
539 rc_linux = -EEXIST;
540 break;
541 default:
542 break;
543 }
544 }
545
546 sf_i->force_restat = 1;
547 sf_r->handle = params.Handle;
548 sf_i->file = file;
549 file->private_data = sf_r;
550 return rc_linux;
551}
552
553/**
554 * Close a regular file.
555 *
556 * @param inode the inode
557 * @param file the file
558 * @returns 0 on success, Linux error code otherwise
559 */
560static int sf_reg_release(struct inode *inode, struct file *file)
561{
562 int rc;
563 struct sf_reg_info *sf_r;
564 struct sf_glob_info *sf_g;
565 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
566
567 TRACE();
568 sf_g = GET_GLOB_INFO(inode->i_sb);
569 sf_r = file->private_data;
570
571 BUG_ON(!sf_g);
572 BUG_ON(!sf_r);
573
574#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
575 /* See the smbfs source (file.c). mmap in particular can cause data to be
576 * written to the file after it is closed, which we can't cope with. We
577 * copy and paste the body of filemap_write_and_wait() here as it was not
578 * defined before 2.6.6 and not exported until quite a bit later. */
579 /* filemap_write_and_wait(inode->i_mapping); */
580 if ( inode->i_mapping->nrpages
581 && filemap_fdatawrite(inode->i_mapping) != -EIO)
582 filemap_fdatawait(inode->i_mapping);
583#endif
584 rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle);
585 if (RT_FAILURE(rc))
586 LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc));
587
588 kfree(sf_r);
589 sf_i->file = NULL;
590 sf_i->handle = SHFL_HANDLE_NIL;
591 file->private_data = NULL;
592 return 0;
593}
594
595#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
596static int sf_reg_fault(struct vm_fault *vmf)
597#elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
598static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
599#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
600static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type)
601# define SET_TYPE(t) *type = (t)
602#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
603static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused)
604# define SET_TYPE(t)
605#endif
606{
607 struct page *page;
608 char *buf;
609 loff_t off;
610 uint32_t nread = PAGE_SIZE;
611 int err;
612#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
613 struct vm_area_struct *vma = vmf->vma;
614#endif
615 struct file *file = vma->vm_file;
616 struct inode *inode = GET_F_DENTRY(file)->d_inode;
617 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
618 struct sf_reg_info *sf_r = file->private_data;
619
620 TRACE();
621#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
622 if (vmf->pgoff > vma->vm_end)
623 return VM_FAULT_SIGBUS;
624#else
625 if (vaddr > vma->vm_end)
626 {
627 SET_TYPE(VM_FAULT_SIGBUS);
628 return NOPAGE_SIGBUS;
629 }
630#endif
631
632 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
633 * which works on virtual addresses. On Linux cannot reliably determine the
634 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
635 page = alloc_page(GFP_USER);
636 if (!page) {
637 LogRelFunc(("failed to allocate page\n"));
638#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
639 return VM_FAULT_OOM;
640#else
641 SET_TYPE(VM_FAULT_OOM);
642 return NOPAGE_OOM;
643#endif
644 }
645
646 buf = kmap(page);
647#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
648 off = (vmf->pgoff << PAGE_SHIFT);
649#else
650 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
651#endif
652 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
653 if (err)
654 {
655 kunmap(page);
656 put_page(page);
657#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
658 return VM_FAULT_SIGBUS;
659#else
660 SET_TYPE(VM_FAULT_SIGBUS);
661 return NOPAGE_SIGBUS;
662#endif
663 }
664
665 BUG_ON (nread > PAGE_SIZE);
666 if (!nread)
667 {
668#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
669 clear_user_page(page_address(page), vmf->pgoff, page);
670#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
671 clear_user_page(page_address(page), vaddr, page);
672#else
673 clear_user_page(page_address(page), vaddr);
674#endif
675 }
676 else
677 memset(buf + nread, 0, PAGE_SIZE - nread);
678
679 flush_dcache_page(page);
680 kunmap(page);
681#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
682 vmf->page = page;
683 return 0;
684#else
685 SET_TYPE(VM_FAULT_MAJOR);
686 return page;
687#endif
688}
689
690static struct vm_operations_struct sf_vma_ops =
691{
692#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
693 .fault = sf_reg_fault
694#else
695 .nopage = sf_reg_nopage
696#endif
697};
698
699static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
700{
701 TRACE();
702 if (vma->vm_flags & VM_SHARED)
703 {
704 LogFunc(("shared mmapping not available\n"));
705 return -EINVAL;
706 }
707
708 vma->vm_ops = &sf_vma_ops;
709 return 0;
710}
711
712struct file_operations sf_reg_fops =
713{
714 .read = sf_reg_read,
715 .open = sf_reg_open,
716 .write = sf_reg_write,
717 .release = sf_reg_release,
718 .mmap = sf_reg_mmap,
719#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
720# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
721/** @todo This code is known to cause caching of data which should not be
722 * cached. Investigate. */
723# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
724 .splice_read = sf_splice_read,
725# else
726 .sendfile = generic_file_sendfile,
727# endif
728 .aio_read = generic_file_aio_read,
729 .aio_write = generic_file_aio_write,
730# endif
731# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
732 .fsync = noop_fsync,
733# else
734 .fsync = simple_sync_file,
735# endif
736 .llseek = generic_file_llseek,
737#endif
738};
739
740
741struct inode_operations sf_reg_iops =
742{
743#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
744 .revalidate = sf_inode_revalidate
745#else
746 .getattr = sf_getattr,
747 .setattr = sf_setattr
748#endif
749};
750
751
752#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
753static int sf_readpage(struct file *file, struct page *page)
754{
755 struct inode *inode = GET_F_DENTRY(file)->d_inode;
756 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
757 struct sf_reg_info *sf_r = file->private_data;
758 uint32_t nread = PAGE_SIZE;
759 char *buf;
760 loff_t off = ((loff_t)page->index) << PAGE_SHIFT;
761 int ret;
762
763 TRACE();
764
765 buf = kmap(page);
766 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
767 if (ret)
768 {
769 kunmap(page);
770 if (PageLocked(page))
771 unlock_page(page);
772 return ret;
773 }
774 BUG_ON(nread > PAGE_SIZE);
775 memset(&buf[nread], 0, PAGE_SIZE - nread);
776 flush_dcache_page(page);
777 kunmap(page);
778 SetPageUptodate(page);
779 unlock_page(page);
780 return 0;
781}
782
783static int
784sf_writepage(struct page *page, struct writeback_control *wbc)
785{
786 struct address_space *mapping = page->mapping;
787 struct inode *inode = mapping->host;
788 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
789 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
790 struct file *file = sf_i->file;
791 struct sf_reg_info *sf_r = file->private_data;
792 char *buf;
793 uint32_t nwritten = PAGE_SIZE;
794 int end_index = inode->i_size >> PAGE_SHIFT;
795 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
796 int err;
797
798 TRACE();
799
800 if (page->index >= end_index)
801 nwritten = inode->i_size & (PAGE_SIZE-1);
802
803 buf = kmap(page);
804
805 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
806 if (err < 0)
807 {
808 ClearPageUptodate(page);
809 goto out;
810 }
811
812 if (off > inode->i_size)
813 inode->i_size = off;
814
815 if (PageError(page))
816 ClearPageError(page);
817 err = 0;
818
819out:
820 kunmap(page);
821
822 unlock_page(page);
823 return err;
824}
825
826# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
827int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
828 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
829{
830 TRACE();
831
832 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
833}
834
835int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
836 unsigned len, unsigned copied, struct page *page, void *fsdata)
837{
838 struct inode *inode = mapping->host;
839 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
840 struct sf_reg_info *sf_r = file->private_data;
841 void *buf;
842 unsigned from = pos & (PAGE_SIZE - 1);
843 uint32_t nwritten = len;
844 int err;
845
846 TRACE();
847
848 buf = kmap(page);
849 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf+from, &nwritten, pos);
850 kunmap(page);
851
852 if (!PageUptodate(page) && err == PAGE_SIZE)
853 SetPageUptodate(page);
854
855 if (err >= 0) {
856 pos += nwritten;
857 if (pos > inode->i_size)
858 inode->i_size = pos;
859 }
860
861 unlock_page(page);
862#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
863 put_page(page);
864#else
865 page_cache_release(page);
866#endif
867
868 return nwritten;
869}
870
871# endif /* KERNEL_VERSION >= 2.6.24 */
872
873struct address_space_operations sf_reg_aops =
874{
875 .readpage = sf_readpage,
876 .writepage = sf_writepage,
877# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
878 .write_begin = sf_write_begin,
879 .write_end = sf_write_end,
880# else
881 .prepare_write = simple_prepare_write,
882 .commit_write = simple_commit_write,
883# endif
884};
885#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette