summaryrefslogtreecommitdiff
path: root/fs/nfs
diff options
context:
space:
mode:
authorSrikant Patnaik2015-01-11 12:28:04 +0530
committerSrikant Patnaik2015-01-11 12:28:04 +0530
commit871480933a1c28f8a9fed4c4d34d06c439a7a422 (patch)
tree8718f573808810c2a1e8cb8fb6ac469093ca2784 /fs/nfs
parent9d40ac5867b9aefe0722bc1f110b965ff294d30d (diff)
downloadFOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.gz
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.tar.bz2
FOSSEE-netbook-kernel-source-871480933a1c28f8a9fed4c4d34d06c439a7a422.zip
Moved, renamed, and deleted files
The original directory structure was scattered and unorganized. Changes are basically to make it look like kernel structure.
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/Kconfig152
-rw-r--r--fs/nfs/Makefile26
-rw-r--r--fs/nfs/blocklayout/Makefile5
-rw-r--r--fs/nfs/blocklayout/blocklayout.c1185
-rw-r--r--fs/nfs/blocklayout/blocklayout.h210
-rw-r--r--fs/nfs/blocklayout/blocklayoutdev.c399
-rw-r--r--fs/nfs/blocklayout/blocklayoutdm.c114
-rw-r--r--fs/nfs/blocklayout/extents.c909
-rw-r--r--fs/nfs/cache_lib.c165
-rw-r--r--fs/nfs/cache_lib.h33
-rw-r--r--fs/nfs/callback.c412
-rw-r--r--fs/nfs/callback.h214
-rw-r--r--fs/nfs/callback_proc.c569
-rw-r--r--fs/nfs/callback_xdr.c998
-rw-r--r--fs/nfs/client.c2078
-rw-r--r--fs/nfs/delegation.c705
-rw-r--r--fs/nfs/delegation.h79
-rw-r--r--fs/nfs/dir.c2362
-rw-r--r--fs/nfs/direct.c1032
-rw-r--r--fs/nfs/dns_resolve.c448
-rw-r--r--fs/nfs/dns_resolve.h36
-rw-r--r--fs/nfs/file.c900
-rw-r--r--fs/nfs/fscache-index.c337
-rw-r--r--fs/nfs/fscache.c535
-rw-r--r--fs/nfs/fscache.h222
-rw-r--r--fs/nfs/getroot.c264
-rw-r--r--fs/nfs/idmap.c795
-rw-r--r--fs/nfs/inode.c1752
-rw-r--r--fs/nfs/internal.h468
-rw-r--r--fs/nfs/iostat.h71
-rw-r--r--fs/nfs/mount_clnt.c518
-rw-r--r--fs/nfs/namespace.c335
-rw-r--r--fs/nfs/netns.h27
-rw-r--r--fs/nfs/nfs2xdr.c1157
-rw-r--r--fs/nfs/nfs3acl.c440
-rw-r--r--fs/nfs/nfs3proc.c915
-rw-r--r--fs/nfs/nfs3xdr.c2498
-rw-r--r--fs/nfs/nfs4_fs.h379
-rw-r--r--fs/nfs/nfs4filelayout.c1109
-rw-r--r--fs/nfs/nfs4filelayout.h123
-rw-r--r--fs/nfs/nfs4filelayoutdev.c838
-rw-r--r--fs/nfs/nfs4namespace.c343
-rw-r--r--fs/nfs/nfs4proc.c6625
-rw-r--r--fs/nfs/nfs4renewd.c136
-rw-r--r--fs/nfs/nfs4state.c1877
-rw-r--r--fs/nfs/nfs4xdr.c7101
-rw-r--r--fs/nfs/nfsroot.c309
-rw-r--r--fs/nfs/objlayout/Kbuild5
-rw-r--r--fs/nfs/objlayout/objio_osd.c624
-rw-r--r--fs/nfs/objlayout/objlayout.c785
-rw-r--r--fs/nfs/objlayout/objlayout.h189
-rw-r--r--fs/nfs/objlayout/pnfs_osd_xdr_cli.c415
-rw-r--r--fs/nfs/pagelist.c415
-rw-r--r--fs/nfs/pnfs.c1552
-rw-r--r--fs/nfs/pnfs.h440
-rw-r--r--fs/nfs/pnfs_dev.c278
-rw-r--r--fs/nfs/proc.c771
-rw-r--r--fs/nfs/read.c699
-rw-r--r--fs/nfs/super.c3149
-rw-r--r--fs/nfs/symlink.c78
-rw-r--r--fs/nfs/sysctl.c90
-rw-r--r--fs/nfs/unlink.c558
-rw-r--r--fs/nfs/write.c1790
63 files changed, 54043 insertions, 0 deletions
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
new file mode 100644
index 00000000..2a0e6c59
--- /dev/null
+++ b/fs/nfs/Kconfig
@@ -0,0 +1,152 @@
+config NFS_FS
+ tristate "NFS client support"
+ depends on INET && FILE_LOCKING
+ select LOCKD
+ select SUNRPC
+ select NFS_ACL_SUPPORT if NFS_V3_ACL
+ help
+ Choose Y here if you want to access files residing on other
+ computers using Sun's Network File System protocol. To compile
+ this file system support as a module, choose M here: the module
+ will be called nfs.
+
+ To mount file systems exported by NFS servers, you also need to
+ install the user space mount.nfs command which can be found in
+ the Linux nfs-utils package, available from http://linux-nfs.org/.
+ Information about using the mount command is available in the
+ mount(8) man page. More detail about the Linux NFS client
+ implementation is available via the nfs(5) man page.
+
+ Below you can choose which versions of the NFS protocol are
+ available in the kernel to mount NFS servers. Support for NFS
+ version 2 (RFC 1094) is always available when NFS_FS is selected.
+
+ To configure a system which mounts its root file system via NFS
+ at boot time, say Y here, select "Kernel level IP
+ autoconfiguration" in the NETWORK menu, and select "Root file
+ system on NFS" below. You cannot compile this file system as a
+ module in this case.
+
+ If unsure, say N.
+
+config NFS_V3
+ bool "NFS client support for NFS version 3"
+ depends on NFS_FS
+ help
+ This option enables support for version 3 of the NFS protocol
+ (RFC 1813) in the kernel's NFS client.
+
+ If unsure, say Y.
+
+config NFS_V3_ACL
+ bool "NFS client support for the NFSv3 ACL protocol extension"
+ depends on NFS_V3
+ help
+ Some NFS servers support an auxiliary NFSv3 ACL protocol that
+ Sun added to Solaris but never became an official part of the
+ NFS version 3 protocol. This protocol extension allows
+ applications on NFS clients to manipulate POSIX Access Control
+ Lists on files residing on NFS servers. NFS servers enforce
+ ACLs on local files whether this protocol is available or not.
+
+ Choose Y here if your NFS server supports the Solaris NFSv3 ACL
+ protocol extension and you want your NFS client to allow
+ applications to access and modify ACLs on files on the server.
+
+ Most NFS servers don't support the Solaris NFSv3 ACL protocol
+ extension. You can choose N here or specify the "noacl" mount
+ option to prevent your NFS client from trying to use the NFSv3
+ ACL protocol.
+
+ If unsure, say N.
+
+config NFS_V4
+ bool "NFS client support for NFS version 4"
+ depends on NFS_FS
+ select SUNRPC_GSS
+ select KEYS
+ help
+ This option enables support for version 4 of the NFS protocol
+ (RFC 3530) in the kernel's NFS client.
+
+ To mount NFS servers using NFSv4, you also need to install user
+ space programs which can be found in the Linux nfs-utils package,
+ available from http://linux-nfs.org/.
+
+ If unsure, say Y.
+
+config NFS_V4_1
+ bool "NFS client support for NFSv4.1 (EXPERIMENTAL)"
+ depends on NFS_FS && NFS_V4 && EXPERIMENTAL
+ select SUNRPC_BACKCHANNEL
+ select PNFS_FILE_LAYOUT
+ help
+ This option enables support for minor version 1 of the NFSv4 protocol
+ (RFC 5661) in the kernel's NFS client.
+
+ If unsure, say N.
+
+config PNFS_FILE_LAYOUT
+ tristate
+
+config PNFS_BLOCK
+ tristate
+ depends on NFS_FS && NFS_V4_1 && BLK_DEV_DM
+ default m
+
+config PNFS_OBJLAYOUT
+ tristate
+ depends on NFS_FS && NFS_V4_1 && SCSI_OSD_ULD
+ default m
+
+config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
+ string "NFSv4.1 Implementation ID Domain"
+ depends on NFS_V4_1
+ default "kernel.org"
+ help
+ This option defines the domain portion of the implementation ID that
+ may be sent in the NFS exchange_id operation. The value must be in
+ the format of a DNS domain name and should be set to the DNS domain
+ name of the distribution.
+ If the NFS client is unchanged from the upstream kernel, this
+ option should be set to the default "kernel.org".
+
+config ROOT_NFS
+ bool "Root file system on NFS"
+ depends on NFS_FS=y && IP_PNP
+ help
+ If you want your system to mount its root file system via NFS,
+ choose Y here. This is common practice for managing systems
+ without local permanent storage. For details, read
+ <file:Documentation/filesystems/nfs/nfsroot.txt>.
+
+ Most people say N here.
+
+config NFS_FSCACHE
+ bool "Provide NFS client caching support"
+ depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y
+ help
+ Say Y here if you want NFS data to be cached locally on disc through
+ the general filesystem cache manager
+
+config NFS_USE_LEGACY_DNS
+ bool "Use the legacy NFS DNS resolver"
+ depends on NFS_V4
+ help
+ The kernel now provides a method for translating a host name into an
+ IP address. Select Y here if you would rather use your own DNS
+ resolver script.
+
+ If unsure, say N
+
+config NFS_USE_KERNEL_DNS
+ bool
+ depends on NFS_V4 && !NFS_USE_LEGACY_DNS
+ select DNS_RESOLVER
+ default y
+
+config NFS_DEBUG
+ bool
+ depends on NFS_FS && SUNRPC_DEBUG
+ select CRC32
+ default y
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
new file mode 100644
index 00000000..b58613d0
--- /dev/null
+++ b/fs/nfs/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for the Linux nfs filesystem routines.
+#
+
+obj-$(CONFIG_NFS_FS) += nfs.o
+
+nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
+ direct.o pagelist.o proc.o read.o symlink.o unlink.o \
+ write.o namespace.o mount_clnt.o \
+ dns_resolve.o cache_lib.o
+nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
+nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
+nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
+nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
+ delegation.o idmap.o \
+ callback.o callback_xdr.o callback_proc.o \
+ nfs4namespace.o
+nfs-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o
+nfs-$(CONFIG_SYSCTL) += sysctl.o
+nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
+
+obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o
+nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o
+
+obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayout/
+obj-$(CONFIG_PNFS_BLOCK) += blocklayout/
diff --git a/fs/nfs/blocklayout/Makefile b/fs/nfs/blocklayout/Makefile
new file mode 100644
index 00000000..d5815505
--- /dev/null
+++ b/fs/nfs/blocklayout/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the pNFS block layout driver kernel module
+#
+obj-$(CONFIG_PNFS_BLOCK) += blocklayoutdriver.o
+blocklayoutdriver-objs := blocklayout.o extents.o blocklayoutdev.o blocklayoutdm.o
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
new file mode 100644
index 00000000..7f6a23f0
--- /dev/null
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -0,0 +1,1185 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayout.c
+ *
+ * Module for the NFSv4.1 pNFS block layout driver.
+ *
+ * Copyright (c) 2006 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@citi.umich.edu>
+ * Fred Isaman <iisaman@umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/bio.h> /* struct bio */
+#include <linux/buffer_head.h> /* various write calls */
+#include <linux/prefetch.h>
+
+#include "../pnfs.h"
+#include "../internal.h"
+#include "blocklayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
+MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
+
+static void print_page(struct page *page)
+{
+ dprintk("PRINTPAGE page %p\n", page);
+ dprintk(" PagePrivate %d\n", PagePrivate(page));
+ dprintk(" PageUptodate %d\n", PageUptodate(page));
+ dprintk(" PageError %d\n", PageError(page));
+ dprintk(" PageDirty %d\n", PageDirty(page));
+ dprintk(" PageReferenced %d\n", PageReferenced(page));
+ dprintk(" PageLocked %d\n", PageLocked(page));
+ dprintk(" PageWriteback %d\n", PageWriteback(page));
+ dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
+ dprintk("\n");
+}
+
+/* Given the be associated with isect, determine if page data needs to be
+ * initialized.
+ */
+static int is_hole(struct pnfs_block_extent *be, sector_t isect)
+{
+ if (be->be_state == PNFS_BLOCK_NONE_DATA)
+ return 1;
+ else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
+ return 0;
+ else
+ return !bl_is_sector_init(be->be_inval, isect);
+}
+
+/* Given the be associated with isect, determine if page data can be
+ * written to disk.
+ */
+static int is_writable(struct pnfs_block_extent *be, sector_t isect)
+{
+ return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
+ be->be_state == PNFS_BLOCK_INVALID_DATA);
+}
+
+/* The data we are handed might be spread across several bios. We need
+ * to track when the last one is finished.
+ */
+struct parallel_io {
+ struct kref refcnt;
+ void (*pnfs_callback) (void *data, int num_se);
+ void *data;
+ int bse_count;
+};
+
+static inline struct parallel_io *alloc_parallel(void *data)
+{
+ struct parallel_io *rv;
+
+ rv = kmalloc(sizeof(*rv), GFP_NOFS);
+ if (rv) {
+ rv->data = data;
+ kref_init(&rv->refcnt);
+ rv->bse_count = 0;
+ }
+ return rv;
+}
+
+static inline void get_parallel(struct parallel_io *p)
+{
+ kref_get(&p->refcnt);
+}
+
+static void destroy_parallel(struct kref *kref)
+{
+ struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
+
+ dprintk("%s enter\n", __func__);
+ p->pnfs_callback(p->data, p->bse_count);
+ kfree(p);
+}
+
+static inline void put_parallel(struct parallel_io *p)
+{
+ kref_put(&p->refcnt, destroy_parallel);
+}
+
+static struct bio *
+bl_submit_bio(int rw, struct bio *bio)
+{
+ if (bio) {
+ get_parallel(bio->bi_private);
+ dprintk("%s submitting %s bio %u@%llu\n", __func__,
+ rw == READ ? "read" : "write",
+ bio->bi_size, (unsigned long long)bio->bi_sector);
+ submit_bio(rw, bio);
+ }
+ return NULL;
+}
+
+static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+ struct parallel_io *par)
+{
+ struct bio *bio;
+
+ npg = min(npg, BIO_MAX_PAGES);
+ bio = bio_alloc(GFP_NOIO, npg);
+ if (!bio && (current->flags & PF_MEMALLOC)) {
+ while (!bio && (npg /= 2))
+ bio = bio_alloc(GFP_NOIO, npg);
+ }
+
+ if (bio) {
+ bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+ bio->bi_bdev = be->be_mdev;
+ bio->bi_end_io = end_io;
+ bio->bi_private = par;
+ }
+ return bio;
+}
+
+static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
+ sector_t isect, struct page *page,
+ struct pnfs_block_extent *be,
+ void (*end_io)(struct bio *, int err),
+ struct parallel_io *par)
+{
+retry:
+ if (!bio) {
+ bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
+ if (!bio)
+ return ERR_PTR(-ENOMEM);
+ }
+ if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ bio = bl_submit_bio(rw, bio);
+ goto retry;
+ }
+ return bio;
+}
+
+/* This is basically copied from mpage_end_io_read */
+static void bl_end_io_read(struct bio *bio, int err)
+{
+ struct parallel_io *par = bio->bi_private;
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
+
+ do {
+ struct page *page = bvec->bv_page;
+
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+ if (uptodate)
+ SetPageUptodate(page);
+ } while (bvec >= bio->bi_io_vec);
+ if (!uptodate) {
+ if (!rdata->pnfs_error)
+ rdata->pnfs_error = -EIO;
+ pnfs_set_lo_fail(rdata->lseg);
+ }
+ bio_put(bio);
+ put_parallel(par);
+}
+
+static void bl_read_cleanup(struct work_struct *work)
+{
+ struct rpc_task *task;
+ struct nfs_read_data *rdata;
+ dprintk("%s enter\n", __func__);
+ task = container_of(work, struct rpc_task, u.tk_work);
+ rdata = container_of(task, struct nfs_read_data, task);
+ pnfs_ld_read_done(rdata);
+}
+
+static void
+bl_end_par_io_read(void *data, int unused)
+{
+ struct nfs_read_data *rdata = data;
+
+ rdata->task.tk_status = rdata->pnfs_error;
+ INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
+ schedule_work(&rdata->task.u.tk_work);
+}
+
+static enum pnfs_try_status
+bl_read_pagelist(struct nfs_read_data *rdata)
+{
+ int i, hole;
+ struct bio *bio = NULL;
+ struct pnfs_block_extent *be = NULL, *cow_read = NULL;
+ sector_t isect, extent_length = 0;
+ struct parallel_io *par;
+ loff_t f_offset = rdata->args.offset;
+ struct page **pages = rdata->args.pages;
+ int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
+
+ dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
+ rdata->npages, f_offset, (unsigned int)rdata->args.count);
+
+ par = alloc_parallel(rdata);
+ if (!par)
+ goto use_mds;
+ par->pnfs_callback = bl_end_par_io_read;
+ /* At this point, we can no longer jump to use_mds */
+
+ isect = (sector_t) (f_offset >> SECTOR_SHIFT);
+ /* Code assumes extents are page-aligned */
+ for (i = pg_index; i < rdata->npages; i++) {
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
+ bl_put_extent(cow_read);
+ bio = bl_submit_bio(READ, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg),
+ isect, &cow_read);
+ if (!be) {
+ rdata->pnfs_error = -EIO;
+ goto out;
+ }
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ if (cow_read) {
+ sector_t cow_length = cow_read->be_length -
+ (isect - cow_read->be_f_offset);
+ extent_length = min(extent_length, cow_length);
+ }
+ }
+ hole = is_hole(be, isect);
+ if (hole && !cow_read) {
+ bio = bl_submit_bio(READ, bio);
+ /* Fill hole w/ zeroes w/o accessing device */
+ dprintk("%s Zeroing page for hole\n", __func__);
+ zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
+ print_page(pages[i]);
+ SetPageUptodate(pages[i]);
+ } else {
+ struct pnfs_block_extent *be_read;
+
+ be_read = (hole && cow_read) ? cow_read : be;
+ bio = bl_add_page_to_bio(bio, rdata->npages - i, READ,
+ isect, pages[i], be_read,
+ bl_end_io_read, par);
+ if (IS_ERR(bio)) {
+ rdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
+ goto out;
+ }
+ }
+ isect += PAGE_CACHE_SECTORS;
+ extent_length -= PAGE_CACHE_SECTORS;
+ }
+ if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) {
+ rdata->res.eof = 1;
+ rdata->res.count = rdata->inode->i_size - f_offset;
+ } else {
+ rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
+ }
+out:
+ bl_put_extent(be);
+ bl_put_extent(cow_read);
+ bl_submit_bio(READ, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+
+ use_mds:
+ dprintk("Giving up and using normal NFS\n");
+ return PNFS_NOT_ATTEMPTED;
+}
+
+static void mark_extents_written(struct pnfs_block_layout *bl,
+ __u64 offset, __u32 count)
+{
+ sector_t isect, end;
+ struct pnfs_block_extent *be;
+ struct pnfs_block_short_extent *se;
+
+ dprintk("%s(%llu, %u)\n", __func__, offset, count);
+ if (count == 0)
+ return;
+ isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
+ end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
+ end >>= SECTOR_SHIFT;
+ while (isect < end) {
+ sector_t len;
+ be = bl_find_get_extent(bl, isect, NULL);
+ BUG_ON(!be); /* FIXME */
+ len = min(end, be->be_f_offset + be->be_length) - isect;
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ se = bl_pop_one_short_extent(be->be_inval);
+ BUG_ON(!se);
+ bl_mark_for_commit(be, isect, len, se);
+ }
+ isect += len;
+ bl_put_extent(be);
+ }
+}
+
+static void bl_end_io_write_zero(struct bio *bio, int err)
+{
+ struct parallel_io *par = bio->bi_private;
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
+
+ do {
+ struct page *page = bvec->bv_page;
+
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+ /* This is the zeroing page we added */
+ end_page_writeback(page);
+ page_cache_release(page);
+ } while (bvec >= bio->bi_io_vec);
+
+ if (unlikely(!uptodate)) {
+ if (!wdata->pnfs_error)
+ wdata->pnfs_error = -EIO;
+ pnfs_set_lo_fail(wdata->lseg);
+ }
+ bio_put(bio);
+ put_parallel(par);
+}
+
+static void bl_end_io_write(struct bio *bio, int err)
+{
+ struct parallel_io *par = bio->bi_private;
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
+
+ if (!uptodate) {
+ if (!wdata->pnfs_error)
+ wdata->pnfs_error = -EIO;
+ pnfs_set_lo_fail(wdata->lseg);
+ }
+ bio_put(bio);
+ put_parallel(par);
+}
+
+/* Function scheduled for call during bl_end_par_io_write,
+ * it marks sectors as written and extends the commitlist.
+ */
+static void bl_write_cleanup(struct work_struct *work)
+{
+ struct rpc_task *task;
+ struct nfs_write_data *wdata;
+ dprintk("%s enter\n", __func__);
+ task = container_of(work, struct rpc_task, u.tk_work);
+ wdata = container_of(task, struct nfs_write_data, task);
+ if (likely(!wdata->pnfs_error)) {
+ /* Marks for LAYOUTCOMMIT */
+ mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+ wdata->args.offset, wdata->args.count);
+ }
+ pnfs_ld_write_done(wdata);
+}
+
+/* Called when last of bios associated with a bl_write_pagelist call finishes */
+static void bl_end_par_io_write(void *data, int num_se)
+{
+ struct nfs_write_data *wdata = data;
+
+ if (unlikely(wdata->pnfs_error)) {
+ bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
+ num_se);
+ }
+
+ wdata->task.tk_status = wdata->pnfs_error;
+ wdata->verf.committed = NFS_FILE_SYNC;
+ INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
+ schedule_work(&wdata->task.u.tk_work);
+}
+
+/* FIXME STUB - mark intersection of layout and page as bad, so is not
+ * used again.
+ */
+static void mark_bad_read(void)
+{
+ return;
+}
+
+/*
+ * map_block: map a requested I/0 block (isect) into an offset in the LVM
+ * block_device
+ */
+static void
+map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
+{
+ dprintk("%s enter be=%p\n", __func__, be);
+
+ set_buffer_mapped(bh);
+ bh->b_bdev = be->be_mdev;
+ bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
+ (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
+
+ dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
+ __func__, (unsigned long long)isect, (long)bh->b_blocknr,
+ bh->b_size);
+ return;
+}
+
+/* Given an unmapped page, zero it or read in page for COW, page is locked
+ * by caller.
+ */
+static int
+init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
+{
+ struct buffer_head *bh = NULL;
+ int ret = 0;
+ sector_t isect;
+
+ dprintk("%s enter, %p\n", __func__, page);
+ BUG_ON(PageUptodate(page));
+ if (!cow_read) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
+ goto cleanup;
+ }
+
+ bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
+ if (!bh) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
+ map_block(bh, isect, cow_read);
+ if (!bh_uptodate_or_lock(bh))
+ ret = bh_submit_read(bh);
+ if (ret)
+ goto cleanup;
+ SetPageUptodate(page);
+
+cleanup:
+ bl_put_extent(cow_read);
+ if (bh)
+ free_buffer_head(bh);
+ if (ret) {
+ /* Need to mark layout with bad read...should now
+ * just use nfs4 for reads and writes.
+ */
+ mark_bad_read();
+ }
+ return ret;
+}
+
+/* Find or create a zeroing page marked being writeback.
+ * Return ERR_PTR on error, NULL to indicate skip this page and page itself
+ * to indicate write out.
+ */
+static struct page *
+bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
+ struct pnfs_block_extent *cow_read)
+{
+ struct page *page;
+ int locked = 0;
+ page = find_get_page(inode->i_mapping, index);
+ if (page)
+ goto check_page;
+
+ page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+ if (unlikely(!page)) {
+ dprintk("%s oom\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+ locked = 1;
+
+check_page:
+ /* PageDirty: Other will write this out
+ * PageWriteback: Other is writing this out
+ * PageUptodate: It was read before
+ */
+ if (PageDirty(page) || PageWriteback(page)) {
+ print_page(page);
+ if (locked)
+ unlock_page(page);
+ page_cache_release(page);
+ return NULL;
+ }
+
+ if (!locked) {
+ lock_page(page);
+ locked = 1;
+ goto check_page;
+ }
+ if (!PageUptodate(page)) {
+ /* New page, readin or zero it */
+ init_page_for_write(page, cow_read);
+ }
+ set_page_writeback(page);
+ unlock_page(page);
+
+ return page;
+}
+
+static enum pnfs_try_status
+bl_write_pagelist(struct nfs_write_data *wdata, int sync)
+{
+ int i, ret, npg_zero, pg_index, last = 0;
+ struct bio *bio = NULL;
+ struct pnfs_block_extent *be = NULL, *cow_read = NULL;
+ sector_t isect, last_isect = 0, extent_length = 0;
+ struct parallel_io *par;
+ loff_t offset = wdata->args.offset;
+ size_t count = wdata->args.count;
+ struct page **pages = wdata->args.pages;
+ struct page *page;
+ pgoff_t index;
+ u64 temp;
+ int npg_per_block =
+ NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
+
+ dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
+ /* At this point, wdata->pages is a (sequential) list of nfs_pages.
+ * We want to write each, and if there is an error set pnfs_error
+ * to have it redone using nfs.
+ */
+ par = alloc_parallel(wdata);
+ if (!par)
+ goto out_mds;
+ par->pnfs_callback = bl_end_par_io_write;
+ /* At this point, have to be more careful with error handling */
+
+ isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
+ be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
+ if (!be || !is_writable(be, isect)) {
+ dprintk("%s no matching extents!\n", __func__);
+ goto out_mds;
+ }
+
+ /* First page inside INVALID extent */
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ if (likely(!bl_push_one_short_extent(be->be_inval)))
+ par->bse_count++;
+ else
+ goto out_mds;
+ temp = offset >> PAGE_CACHE_SHIFT;
+ npg_zero = do_div(temp, npg_per_block);
+ isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
+ (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
+ extent_length = be->be_length - (isect - be->be_f_offset);
+
+fill_invalid_ext:
+ dprintk("%s need to zero %d pages\n", __func__, npg_zero);
+ for (;npg_zero > 0; npg_zero--) {
+ if (bl_is_sector_init(be->be_inval, isect)) {
+ dprintk("isect %llu already init\n",
+ (unsigned long long)isect);
+ goto next_page;
+ }
+ /* page ref released in bl_end_io_write_zero */
+ index = isect >> PAGE_CACHE_SECTOR_SHIFT;
+ dprintk("%s zero %dth page: index %lu isect %llu\n",
+ __func__, npg_zero, index,
+ (unsigned long long)isect);
+ page = bl_find_get_zeroing_page(wdata->inode, index,
+ cow_read);
+ if (unlikely(IS_ERR(page))) {
+ wdata->pnfs_error = PTR_ERR(page);
+ goto out;
+ } else if (page == NULL)
+ goto next_page;
+
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS);
+ if (unlikely(ret)) {
+ dprintk("%s bl_mark_sectors_init fail %d\n",
+ __func__, ret);
+ end_page_writeback(page);
+ page_cache_release(page);
+ wdata->pnfs_error = ret;
+ goto out;
+ }
+ if (likely(!bl_push_one_short_extent(be->be_inval)))
+ par->bse_count++;
+ else {
+ end_page_writeback(page);
+ page_cache_release(page);
+ wdata->pnfs_error = -ENOMEM;
+ goto out;
+ }
+ /* FIXME: This should be done in bi_end_io */
+ mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+ page->index << PAGE_CACHE_SHIFT,
+ PAGE_CACHE_SIZE);
+
+ bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
+ isect, page, be,
+ bl_end_io_write_zero, par);
+ if (IS_ERR(bio)) {
+ wdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
+ goto out;
+ }
+next_page:
+ isect += PAGE_CACHE_SECTORS;
+ extent_length -= PAGE_CACHE_SECTORS;
+ }
+ if (last)
+ goto write_done;
+ }
+ bio = bl_submit_bio(WRITE, bio);
+
+ /* Middle pages */
+ pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
+ for (i = pg_index; i < wdata->npages; i++) {
+ if (!extent_length) {
+ /* We've used up the previous extent */
+ bl_put_extent(be);
+ bio = bl_submit_bio(WRITE, bio);
+ /* Get the next one */
+ be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
+ isect, NULL);
+ if (!be || !is_writable(be, isect)) {
+ wdata->pnfs_error = -EINVAL;
+ goto out;
+ }
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ if (likely(!bl_push_one_short_extent(
+ be->be_inval)))
+ par->bse_count++;
+ else {
+ wdata->pnfs_error = -ENOMEM;
+ goto out;
+ }
+ }
+ extent_length = be->be_length -
+ (isect - be->be_f_offset);
+ }
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ ret = bl_mark_sectors_init(be->be_inval, isect,
+ PAGE_CACHE_SECTORS);
+ if (unlikely(ret)) {
+ dprintk("%s bl_mark_sectors_init fail %d\n",
+ __func__, ret);
+ wdata->pnfs_error = ret;
+ goto out;
+ }
+ }
+ bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
+ isect, pages[i], be,
+ bl_end_io_write, par);
+ if (IS_ERR(bio)) {
+ wdata->pnfs_error = PTR_ERR(bio);
+ bio = NULL;
+ goto out;
+ }
+ isect += PAGE_CACHE_SECTORS;
+ last_isect = isect;
+ extent_length -= PAGE_CACHE_SECTORS;
+ }
+
+ /* Last page inside INVALID extent */
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ bio = bl_submit_bio(WRITE, bio);
+ temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
+ npg_zero = npg_per_block - do_div(temp, npg_per_block);
+ if (npg_zero < npg_per_block) {
+ last = 1;
+ goto fill_invalid_ext;
+ }
+ }
+
+write_done:
+ wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
+ if (count < wdata->res.count) {
+ wdata->res.count = count;
+ }
+out:
+ bl_put_extent(be);
+ bl_submit_bio(WRITE, bio);
+ put_parallel(par);
+ return PNFS_ATTEMPTED;
+out_mds:
+ bl_put_extent(be);
+ kfree(par);
+ return PNFS_NOT_ATTEMPTED;
+}
+
+/* FIXME - range ignored */
+static void
+release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
+{
+ int i;
+ struct pnfs_block_extent *be;
+
+ spin_lock(&bl->bl_ext_lock);
+ for (i = 0; i < EXTENT_LISTS; i++) {
+ while (!list_empty(&bl->bl_extents[i])) {
+ be = list_first_entry(&bl->bl_extents[i],
+ struct pnfs_block_extent,
+ be_node);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ }
+ }
+ spin_unlock(&bl->bl_ext_lock);
+}
+
+static void
+release_inval_marks(struct pnfs_inval_markings *marks)
+{
+ struct pnfs_inval_tracking *pos, *temp;
+ struct pnfs_block_short_extent *se, *stemp;
+
+ list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
+ list_del(&pos->it_link);
+ kfree(pos);
+ }
+
+ list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
+ list_del(&se->bse_node);
+ kfree(se);
+ }
+ return;
+}
+
+static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
+
+ dprintk("%s enter\n", __func__);
+ release_extents(bl, NULL);
+ release_inval_marks(&bl->bl_inval);
+ kfree(bl);
+}
+
+static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
+ gfp_t gfp_flags)
+{
+ struct pnfs_block_layout *bl;
+
+ dprintk("%s enter\n", __func__);
+ bl = kzalloc(sizeof(*bl), gfp_flags);
+ if (!bl)
+ return NULL;
+ spin_lock_init(&bl->bl_ext_lock);
+ INIT_LIST_HEAD(&bl->bl_extents[0]);
+ INIT_LIST_HEAD(&bl->bl_extents[1]);
+ INIT_LIST_HEAD(&bl->bl_commit);
+ INIT_LIST_HEAD(&bl->bl_committing);
+ bl->bl_count = 0;
+ bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
+ BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
+ return &bl->bl_layout;
+}
+
+static void bl_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ dprintk("%s enter\n", __func__);
+ kfree(lseg);
+}
+
+/* We pretty much ignore lseg, and store all data layout wide, so we
+ * can correctly merge.
+ */
+static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
+ struct nfs4_layoutget_res *lgr,
+ gfp_t gfp_flags)
+{
+ struct pnfs_layout_segment *lseg;
+ int status;
+
+ dprintk("%s enter\n", __func__);
+ lseg = kzalloc(sizeof(*lseg), gfp_flags);
+ if (!lseg)
+ return ERR_PTR(-ENOMEM);
+ status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
+ if (status) {
+ /* We don't want to call the full-blown bl_free_lseg,
+ * since on error extents were not touched.
+ */
+ kfree(lseg);
+ return ERR_PTR(status);
+ }
+ return lseg;
+}
+
+static void
+bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *arg)
+{
+ dprintk("%s enter\n", __func__);
+ encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
+}
+
+static void
+bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
+{
+ struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
+
+ dprintk("%s enter\n", __func__);
+ clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
+}
+
+static void free_blk_mountid(struct block_mount_id *mid)
+{
+ if (mid) {
+ struct pnfs_block_dev *dev, *tmp;
+
+ /* No need to take bm_lock as we are last user freeing bm_devlist */
+ list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
+ list_del(&dev->bm_node);
+ bl_free_block_dev(dev);
+ }
+ kfree(mid);
+ }
+}
+
+/* This is mostly copied from the filelayout's get_device_info function.
+ * It seems much of this should be at the generic pnfs level.
+ */
+static struct pnfs_block_dev *
+nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
+ struct nfs4_deviceid *d_id)
+{
+ struct pnfs_device *dev;
+ struct pnfs_block_dev *rv;
+ u32 max_resp_sz;
+ int max_pages;
+ struct page **pages = NULL;
+ int i, rc;
+
+ /*
+ * Use the session max response size as the basis for setting
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ max_pages = nfs_page_array_len(0, max_resp_sz);
+ dprintk("%s max_resp_sz %u max_pages %d\n",
+ __func__, max_resp_sz, max_pages);
+
+ dev = kmalloc(sizeof(*dev), GFP_NOFS);
+ if (!dev) {
+ dprintk("%s kmalloc failed\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pages = kzalloc(max_pages * sizeof(struct page *), GFP_NOFS);
+ if (pages == NULL) {
+ kfree(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+ for (i = 0; i < max_pages; i++) {
+ pages[i] = alloc_page(GFP_NOFS);
+ if (!pages[i]) {
+ rv = ERR_PTR(-ENOMEM);
+ goto out_free;
+ }
+ }
+
+ memcpy(&dev->dev_id, d_id, sizeof(*d_id));
+ dev->layout_type = LAYOUT_BLOCK_VOLUME;
+ dev->pages = pages;
+ dev->pgbase = 0;
+ dev->pglen = PAGE_SIZE * max_pages;
+ dev->mincount = 0;
+
+ dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
+ rc = nfs4_proc_getdeviceinfo(server, dev);
+ dprintk("%s getdevice info returns %d\n", __func__, rc);
+ if (rc) {
+ rv = ERR_PTR(rc);
+ goto out_free;
+ }
+
+ rv = nfs4_blk_decode_device(server, dev);
+ out_free:
+ for (i = 0; i < max_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ kfree(dev);
+ return rv;
+}
+
+static int
+bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
+{
+ struct block_mount_id *b_mt_id = NULL;
+ struct pnfs_devicelist *dlist = NULL;
+ struct pnfs_block_dev *bdev;
+ LIST_HEAD(block_disklist);
+ int status, i;
+
+ dprintk("%s enter\n", __func__);
+
+ if (server->pnfs_blksize == 0) {
+ dprintk("%s Server did not return blksize\n", __func__);
+ return -EINVAL;
+ }
+ b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
+ if (!b_mt_id) {
+ status = -ENOMEM;
+ goto out_error;
+ }
+ /* Initialize nfs4 block layout mount id */
+ spin_lock_init(&b_mt_id->bm_lock);
+ INIT_LIST_HEAD(&b_mt_id->bm_devlist);
+
+ dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
+ if (!dlist) {
+ status = -ENOMEM;
+ goto out_error;
+ }
+ dlist->eof = 0;
+ while (!dlist->eof) {
+ status = nfs4_proc_getdevicelist(server, fh, dlist);
+ if (status)
+ goto out_error;
+ dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
+ __func__, dlist->num_devs, dlist->eof);
+ for (i = 0; i < dlist->num_devs; i++) {
+ bdev = nfs4_blk_get_deviceinfo(server, fh,
+ &dlist->dev_id[i]);
+ if (IS_ERR(bdev)) {
+ status = PTR_ERR(bdev);
+ goto out_error;
+ }
+ spin_lock(&b_mt_id->bm_lock);
+ list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
+ spin_unlock(&b_mt_id->bm_lock);
+ }
+ }
+ dprintk("%s SUCCESS\n", __func__);
+ server->pnfs_ld_data = b_mt_id;
+
+ out_return:
+ kfree(dlist);
+ return status;
+
+ out_error:
+ free_blk_mountid(b_mt_id);
+ goto out_return;
+}
+
+static int
+bl_clear_layoutdriver(struct nfs_server *server)
+{
+ struct block_mount_id *b_mt_id = server->pnfs_ld_data;
+
+ dprintk("%s enter\n", __func__);
+ free_blk_mountid(b_mt_id);
+ dprintk("%s RETURNS\n", __func__);
+ return 0;
+}
+
+static const struct nfs_pageio_ops bl_pg_read_ops = {
+ .pg_init = pnfs_generic_pg_init_read,
+ .pg_test = pnfs_generic_pg_test,
+ .pg_doio = pnfs_generic_pg_readpages,
+};
+
+static const struct nfs_pageio_ops bl_pg_write_ops = {
+ .pg_init = pnfs_generic_pg_init_write,
+ .pg_test = pnfs_generic_pg_test,
+ .pg_doio = pnfs_generic_pg_writepages,
+};
+
+static struct pnfs_layoutdriver_type blocklayout_type = {
+ .id = LAYOUT_BLOCK_VOLUME,
+ .name = "LAYOUT_BLOCK_VOLUME",
+ .read_pagelist = bl_read_pagelist,
+ .write_pagelist = bl_write_pagelist,
+ .alloc_layout_hdr = bl_alloc_layout_hdr,
+ .free_layout_hdr = bl_free_layout_hdr,
+ .alloc_lseg = bl_alloc_lseg,
+ .free_lseg = bl_free_lseg,
+ .encode_layoutcommit = bl_encode_layoutcommit,
+ .cleanup_layoutcommit = bl_cleanup_layoutcommit,
+ .set_layoutdriver = bl_set_layoutdriver,
+ .clear_layoutdriver = bl_clear_layoutdriver,
+ .pg_read_ops = &bl_pg_read_ops,
+ .pg_write_ops = &bl_pg_write_ops,
+};
+
+static const struct rpc_pipe_ops bl_upcall_ops = {
+ .upcall = rpc_pipe_generic_upcall,
+ .downcall = bl_pipe_downcall,
+ .destroy_msg = bl_pipe_destroy_msg,
+};
+
+static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
+ struct rpc_pipe *pipe)
+{
+ struct dentry *dir, *dentry;
+
+ dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
+ if (dir == NULL)
+ return ERR_PTR(-ENOENT);
+ dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
+ dput(dir);
+ return dentry;
+}
+
+static void nfs4blocklayout_unregister_sb(struct super_block *sb,
+ struct rpc_pipe *pipe)
+{
+ if (pipe->dentry)
+ rpc_unlink(pipe->dentry);
+}
+
+static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct super_block *sb = ptr;
+ struct net *net = sb->s_fs_info;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ struct dentry *dentry;
+ int ret = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return 0;
+
+ if (nn->bl_device_pipe == NULL) {
+ module_put(THIS_MODULE);
+ return 0;
+ }
+
+ switch (event) {
+ case RPC_PIPEFS_MOUNT:
+ dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ break;
+ }
+ nn->bl_device_pipe->dentry = dentry;
+ break;
+ case RPC_PIPEFS_UMOUNT:
+ if (nn->bl_device_pipe->dentry)
+ nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static struct notifier_block nfs4blocklayout_block = {
+ .notifier_call = rpc_pipefs_event,
+};
+
+static struct dentry *nfs4blocklayout_register_net(struct net *net,
+ struct rpc_pipe *pipe)
+{
+ struct super_block *pipefs_sb;
+ struct dentry *dentry;
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (!pipefs_sb)
+ return NULL;
+ dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
+ rpc_put_sb_net(net);
+ return dentry;
+}
+
+static void nfs4blocklayout_unregister_net(struct net *net,
+ struct rpc_pipe *pipe)
+{
+ struct super_block *pipefs_sb;
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+ nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
+ rpc_put_sb_net(net);
+ }
+}
+
+static int nfs4blocklayout_net_init(struct net *net)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ struct dentry *dentry;
+
+ init_waitqueue_head(&nn->bl_wq);
+ nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
+ if (IS_ERR(nn->bl_device_pipe))
+ return PTR_ERR(nn->bl_device_pipe);
+ dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
+ if (IS_ERR(dentry)) {
+ rpc_destroy_pipe_data(nn->bl_device_pipe);
+ return PTR_ERR(dentry);
+ }
+ nn->bl_device_pipe->dentry = dentry;
+ return 0;
+}
+
+static void nfs4blocklayout_net_exit(struct net *net)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
+ rpc_destroy_pipe_data(nn->bl_device_pipe);
+ nn->bl_device_pipe = NULL;
+}
+
+static struct pernet_operations nfs4blocklayout_net_ops = {
+ .init = nfs4blocklayout_net_init,
+ .exit = nfs4blocklayout_net_exit,
+};
+
+static int __init nfs4blocklayout_init(void)
+{
+ int ret;
+
+ dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
+
+ ret = pnfs_register_layoutdriver(&blocklayout_type);
+ if (ret)
+ goto out;
+
+ ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
+ if (ret)
+ goto out_remove;
+ ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
+ if (ret)
+ goto out_notifier;
+out:
+ return ret;
+
+out_notifier:
+ rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
+out_remove:
+ pnfs_unregister_layoutdriver(&blocklayout_type);
+ return ret;
+}
+
+static void __exit nfs4blocklayout_exit(void)
+{
+ dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
+ __func__);
+
+ rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
+ unregister_pernet_subsys(&nfs4blocklayout_net_ops);
+ pnfs_unregister_layoutdriver(&blocklayout_type);
+}
+
+MODULE_ALIAS("nfs-layouttype4-3");
+
+module_init(nfs4blocklayout_init);
+module_exit(nfs4blocklayout_exit);
diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
new file mode 100644
index 00000000..03350690
--- /dev/null
+++ b/fs/nfs/blocklayout/blocklayout.h
@@ -0,0 +1,210 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayout.h
+ *
+ * Module for the NFSv4.1 pNFS block layout driver.
+ *
+ * Copyright (c) 2006 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@citi.umich.edu>
+ * Fred Isaman <iisaman@umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+#ifndef FS_NFS_NFS4BLOCKLAYOUT_H
+#define FS_NFS_NFS4BLOCKLAYOUT_H
+
+#include <linux/device-mapper.h>
+#include <linux/nfs_fs.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+
+#include "../pnfs.h"
+#include "../netns.h"
+
+#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
+#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
+
+struct block_mount_id {
+ spinlock_t bm_lock; /* protects list */
+ struct list_head bm_devlist; /* holds pnfs_block_dev */
+};
+
+struct pnfs_block_dev {
+ struct list_head bm_node;
+ struct nfs4_deviceid bm_mdevid; /* associated devid */
+ struct block_device *bm_mdev; /* meta device itself */
+ struct net *net;
+};
+
+enum exstate4 {
+ PNFS_BLOCK_READWRITE_DATA = 0,
+ PNFS_BLOCK_READ_DATA = 1,
+ PNFS_BLOCK_INVALID_DATA = 2, /* mapped, but data is invalid */
+ PNFS_BLOCK_NONE_DATA = 3 /* unmapped, it's a hole */
+};
+
+#define MY_MAX_TAGS (15) /* tag bitnums used must be less than this */
+
+struct my_tree {
+ sector_t mtt_step_size; /* Internal sector alignment */
+ struct list_head mtt_stub; /* Should be a radix tree */
+};
+
+struct pnfs_inval_markings {
+ spinlock_t im_lock;
+ struct my_tree im_tree; /* Sectors that need LAYOUTCOMMIT */
+ sector_t im_block_size; /* Server blocksize in sectors */
+ struct list_head im_extents; /* Short extents for INVAL->RW conversion */
+};
+
+struct pnfs_inval_tracking {
+ struct list_head it_link;
+ int it_sector;
+ int it_tags;
+};
+
+/* sector_t fields are all in 512-byte sectors */
+struct pnfs_block_extent {
+ struct kref be_refcnt;
+ struct list_head be_node; /* link into lseg list */
+ struct nfs4_deviceid be_devid; /* FIXME: could use device cache instead */
+ struct block_device *be_mdev;
+ sector_t be_f_offset; /* the starting offset in the file */
+ sector_t be_length; /* the size of the extent */
+ sector_t be_v_offset; /* the starting offset in the volume */
+ enum exstate4 be_state; /* the state of this extent */
+ struct pnfs_inval_markings *be_inval; /* tracks INVAL->RW transition */
+};
+
+/* Shortened extent used by LAYOUTCOMMIT */
+struct pnfs_block_short_extent {
+ struct list_head bse_node;
+ struct nfs4_deviceid bse_devid;
+ struct block_device *bse_mdev;
+ sector_t bse_f_offset; /* the starting offset in the file */
+ sector_t bse_length; /* the size of the extent */
+};
+
+static inline void
+BL_INIT_INVAL_MARKS(struct pnfs_inval_markings *marks, sector_t blocksize)
+{
+ spin_lock_init(&marks->im_lock);
+ INIT_LIST_HEAD(&marks->im_tree.mtt_stub);
+ INIT_LIST_HEAD(&marks->im_extents);
+ marks->im_block_size = blocksize;
+ marks->im_tree.mtt_step_size = min((sector_t)PAGE_CACHE_SECTORS,
+ blocksize);
+}
+
+enum extentclass4 {
+ RW_EXTENT = 0, /* READWRTE and INVAL */
+ RO_EXTENT = 1, /* READ and NONE */
+ EXTENT_LISTS = 2,
+};
+
+static inline int bl_choose_list(enum exstate4 state)
+{
+ if (state == PNFS_BLOCK_READ_DATA || state == PNFS_BLOCK_NONE_DATA)
+ return RO_EXTENT;
+ else
+ return RW_EXTENT;
+}
+
+struct pnfs_block_layout {
+ struct pnfs_layout_hdr bl_layout;
+ struct pnfs_inval_markings bl_inval; /* tracks INVAL->RW transition */
+ spinlock_t bl_ext_lock; /* Protects list manipulation */
+ struct list_head bl_extents[EXTENT_LISTS]; /* R and RW extents */
+ struct list_head bl_commit; /* Needs layout commit */
+ struct list_head bl_committing; /* Layout committing */
+ unsigned int bl_count; /* entries in bl_commit */
+ sector_t bl_blocksize; /* Server blocksize in sectors */
+};
+
+#define BLK_ID(lo) ((struct block_mount_id *)(NFS_SERVER(lo->plh_inode)->pnfs_ld_data))
+
+static inline struct pnfs_block_layout *
+BLK_LO2EXT(struct pnfs_layout_hdr *lo)
+{
+ return container_of(lo, struct pnfs_block_layout, bl_layout);
+}
+
+static inline struct pnfs_block_layout *
+BLK_LSEG2EXT(struct pnfs_layout_segment *lseg)
+{
+ return BLK_LO2EXT(lseg->pls_layout);
+}
+
+struct bl_pipe_msg {
+ struct rpc_pipe_msg msg;
+ wait_queue_head_t *bl_wq;
+};
+
+struct bl_msg_hdr {
+ u8 type;
+ u16 totallen; /* length of entire message, including hdr itself */
+};
+
+#define BL_DEVICE_UMOUNT 0x0 /* Umount--delete devices */
+#define BL_DEVICE_MOUNT 0x1 /* Mount--create devices*/
+#define BL_DEVICE_REQUEST_INIT 0x0 /* Start request */
+#define BL_DEVICE_REQUEST_PROC 0x1 /* User level process succeeds */
+#define BL_DEVICE_REQUEST_ERR 0x2 /* User level process fails */
+
+/* blocklayoutdev.c */
+ssize_t bl_pipe_downcall(struct file *, const char __user *, size_t);
+void bl_pipe_destroy_msg(struct rpc_pipe_msg *);
+struct block_device *nfs4_blkdev_get(dev_t dev);
+int nfs4_blkdev_put(struct block_device *bdev);
+struct pnfs_block_dev *nfs4_blk_decode_device(struct nfs_server *server,
+ struct pnfs_device *dev);
+int nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
+
+/* blocklayoutdm.c */
+void bl_free_block_dev(struct pnfs_block_dev *bdev);
+
+/* extents.c */
+struct pnfs_block_extent *
+bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect,
+ struct pnfs_block_extent **cow_read);
+int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
+ sector_t offset, sector_t length);
+void bl_put_extent(struct pnfs_block_extent *be);
+struct pnfs_block_extent *bl_alloc_extent(void);
+int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect);
+int encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *arg);
+void clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
+ const struct nfs4_layoutcommit_args *arg,
+ int status);
+int bl_add_merge_extent(struct pnfs_block_layout *bl,
+ struct pnfs_block_extent *new);
+int bl_mark_for_commit(struct pnfs_block_extent *be,
+ sector_t offset, sector_t length,
+ struct pnfs_block_short_extent *new);
+int bl_push_one_short_extent(struct pnfs_inval_markings *marks);
+struct pnfs_block_short_extent *
+bl_pop_one_short_extent(struct pnfs_inval_markings *marks);
+void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free);
+
+#endif /* FS_NFS_NFS4BLOCKLAYOUT_H */
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
new file mode 100644
index 00000000..a5c88a55
--- /dev/null
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -0,0 +1,399 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayoutdev.c
+ *
+ * Device operations for the pnfs nfs4 file layout driver.
+ *
+ * Copyright (c) 2006 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@citi.umich.edu>
+ * Fred Isaman <iisaman@umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+#include <linux/module.h>
+#include <linux/buffer_head.h> /* __bread */
+
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/hash.h>
+
+#include "blocklayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+static int decode_sector_number(__be32 **rp, sector_t *sp)
+{
+ uint64_t s;
+
+ *rp = xdr_decode_hyper(*rp, &s);
+ if (s & 0x1ff) {
+ printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
+ return -1;
+ }
+ *sp = s >> SECTOR_SHIFT;
+ return 0;
+}
+
+/* Open a block_device by device number. */
+struct block_device *nfs4_blkdev_get(dev_t dev)
+{
+ struct block_device *bd;
+
+ dprintk("%s enter\n", __func__);
+ bd = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+ if (IS_ERR(bd))
+ goto fail;
+ return bd;
+fail:
+ dprintk("%s failed to open device : %ld\n",
+ __func__, PTR_ERR(bd));
+ return NULL;
+}
+
+/*
+ * Release the block device
+ */
+int nfs4_blkdev_put(struct block_device *bdev)
+{
+ dprintk("%s for device %d:%d\n", __func__, MAJOR(bdev->bd_dev),
+ MINOR(bdev->bd_dev));
+ return blkdev_put(bdev, FMODE_READ);
+}
+
+ssize_t bl_pipe_downcall(struct file *filp, const char __user *src,
+ size_t mlen)
+{
+ struct nfs_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info,
+ nfs_net_id);
+
+ if (mlen != sizeof (struct bl_dev_msg))
+ return -EINVAL;
+
+ if (copy_from_user(&nn->bl_mount_reply, src, mlen) != 0)
+ return -EFAULT;
+
+ wake_up(&nn->bl_wq);
+
+ return mlen;
+}
+
+void bl_pipe_destroy_msg(struct rpc_pipe_msg *msg)
+{
+ struct bl_pipe_msg *bl_pipe_msg = container_of(msg, struct bl_pipe_msg, msg);
+
+ if (msg->errno >= 0)
+ return;
+ wake_up(bl_pipe_msg->bl_wq);
+}
+
+/*
+ * Decodes pnfs_block_deviceaddr4 which is XDR encoded in dev->dev_addr_buf.
+ */
+struct pnfs_block_dev *
+nfs4_blk_decode_device(struct nfs_server *server,
+ struct pnfs_device *dev)
+{
+ struct pnfs_block_dev *rv;
+ struct block_device *bd = NULL;
+ struct bl_pipe_msg bl_pipe_msg;
+ struct rpc_pipe_msg *msg = &bl_pipe_msg.msg;
+ struct bl_msg_hdr bl_msg = {
+ .type = BL_DEVICE_MOUNT,
+ .totallen = dev->mincount,
+ };
+ uint8_t *dataptr;
+ DECLARE_WAITQUEUE(wq, current);
+ int offset, len, i, rc;
+ struct net *net = server->nfs_client->net;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ struct bl_dev_msg *reply = &nn->bl_mount_reply;
+
+ dprintk("%s CREATING PIPEFS MESSAGE\n", __func__);
+ dprintk("%s: deviceid: %s, mincount: %d\n", __func__, dev->dev_id.data,
+ dev->mincount);
+
+ bl_pipe_msg.bl_wq = &nn->bl_wq;
+ memset(msg, 0, sizeof(*msg));
+ msg->data = kzalloc(sizeof(bl_msg) + dev->mincount, GFP_NOFS);
+ if (!msg->data) {
+ rv = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ memcpy(msg->data, &bl_msg, sizeof(bl_msg));
+ dataptr = (uint8_t *) msg->data;
+ len = dev->mincount;
+ offset = sizeof(bl_msg);
+ for (i = 0; len > 0; i++) {
+ memcpy(&dataptr[offset], page_address(dev->pages[i]),
+ len < PAGE_CACHE_SIZE ? len : PAGE_CACHE_SIZE);
+ len -= PAGE_CACHE_SIZE;
+ offset += PAGE_CACHE_SIZE;
+ }
+ msg->len = sizeof(bl_msg) + dev->mincount;
+
+ dprintk("%s CALLING USERSPACE DAEMON\n", __func__);
+ add_wait_queue(&nn->bl_wq, &wq);
+ rc = rpc_queue_upcall(nn->bl_device_pipe, msg);
+ if (rc < 0) {
+ remove_wait_queue(&nn->bl_wq, &wq);
+ rv = ERR_PTR(rc);
+ goto out;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&nn->bl_wq, &wq);
+
+ if (reply->status != BL_DEVICE_REQUEST_PROC) {
+ dprintk("%s failed to open device: %d\n",
+ __func__, reply->status);
+ rv = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ bd = nfs4_blkdev_get(MKDEV(reply->major, reply->minor));
+ if (IS_ERR(bd)) {
+ rc = PTR_ERR(bd);
+ dprintk("%s failed to open device : %d\n", __func__, rc);
+ rv = ERR_PTR(rc);
+ goto out;
+ }
+
+ rv = kzalloc(sizeof(*rv), GFP_NOFS);
+ if (!rv) {
+ rv = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ rv->bm_mdev = bd;
+ memcpy(&rv->bm_mdevid, &dev->dev_id, sizeof(struct nfs4_deviceid));
+ rv->net = net;
+ dprintk("%s Created device %s with bd_block_size %u\n",
+ __func__,
+ bd->bd_disk->disk_name,
+ bd->bd_block_size);
+
+out:
+ kfree(msg->data);
+ return rv;
+}
+
+/* Map deviceid returned by the server to constructed block_device */
+static struct block_device *translate_devid(struct pnfs_layout_hdr *lo,
+ struct nfs4_deviceid *id)
+{
+ struct block_device *rv = NULL;
+ struct block_mount_id *mid;
+ struct pnfs_block_dev *dev;
+
+ dprintk("%s enter, lo=%p, id=%p\n", __func__, lo, id);
+ mid = BLK_ID(lo);
+ spin_lock(&mid->bm_lock);
+ list_for_each_entry(dev, &mid->bm_devlist, bm_node) {
+ if (memcmp(id->data, dev->bm_mdevid.data,
+ NFS4_DEVICEID4_SIZE) == 0) {
+ rv = dev->bm_mdev;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock(&mid->bm_lock);
+ dprintk("%s returning %p\n", __func__, rv);
+ return rv;
+}
+
+/* Tracks info needed to ensure extents in layout obey constraints of spec */
+struct layout_verification {
+ u32 mode; /* R or RW */
+ u64 start; /* Expected start of next non-COW extent */
+ u64 inval; /* Start of INVAL coverage */
+ u64 cowread; /* End of COW read coverage */
+};
+
+/* Verify the extent meets the layout requirements of the pnfs-block draft,
+ * section 2.3.1.
+ */
+static int verify_extent(struct pnfs_block_extent *be,
+ struct layout_verification *lv)
+{
+ if (lv->mode == IOMODE_READ) {
+ if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
+ be->be_state == PNFS_BLOCK_INVALID_DATA)
+ return -EIO;
+ if (be->be_f_offset != lv->start)
+ return -EIO;
+ lv->start += be->be_length;
+ return 0;
+ }
+ /* lv->mode == IOMODE_RW */
+ if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
+ if (be->be_f_offset != lv->start)
+ return -EIO;
+ if (lv->cowread > lv->start)
+ return -EIO;
+ lv->start += be->be_length;
+ lv->inval = lv->start;
+ return 0;
+ } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+ if (be->be_f_offset != lv->start)
+ return -EIO;
+ lv->start += be->be_length;
+ return 0;
+ } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
+ if (be->be_f_offset > lv->start)
+ return -EIO;
+ if (be->be_f_offset < lv->inval)
+ return -EIO;
+ if (be->be_f_offset < lv->cowread)
+ return -EIO;
+ /* It looks like you might want to min this with lv->start,
+ * but you really don't.
+ */
+ lv->inval = lv->inval + be->be_length;
+ lv->cowread = be->be_f_offset + be->be_length;
+ return 0;
+ } else
+ return -EIO;
+}
+
+/* XDR decode pnfs_block_layout4 structure */
+int
+nfs4_blk_process_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs4_layoutget_res *lgr, gfp_t gfp_flags)
+{
+ struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
+ int i, status = -EIO;
+ uint32_t count;
+ struct pnfs_block_extent *be = NULL, *save;
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ __be32 *p;
+ struct layout_verification lv = {
+ .mode = lgr->range.iomode,
+ .start = lgr->range.offset >> SECTOR_SHIFT,
+ .inval = lgr->range.offset >> SECTOR_SHIFT,
+ .cowread = lgr->range.offset >> SECTOR_SHIFT,
+ };
+ LIST_HEAD(extents);
+
+ dprintk("---> %s\n", __func__);
+
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ return -ENOMEM;
+
+ xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err;
+
+ count = be32_to_cpup(p++);
+
+ dprintk("%s enter, number of extents %i\n", __func__, count);
+ p = xdr_inline_decode(&stream, (28 + NFS4_DEVICEID4_SIZE) * count);
+ if (unlikely(!p))
+ goto out_err;
+
+ /* Decode individual extents, putting them in temporary
+ * staging area until whole layout is decoded to make error
+ * recovery easier.
+ */
+ for (i = 0; i < count; i++) {
+ be = bl_alloc_extent();
+ if (!be) {
+ status = -ENOMEM;
+ goto out_err;
+ }
+ memcpy(&be->be_devid, p, NFS4_DEVICEID4_SIZE);
+ p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
+ be->be_mdev = translate_devid(lo, &be->be_devid);
+ if (!be->be_mdev)
+ goto out_err;
+
+ /* The next three values are read in as bytes,
+ * but stored as 512-byte sector lengths
+ */
+ if (decode_sector_number(&p, &be->be_f_offset) < 0)
+ goto out_err;
+ if (decode_sector_number(&p, &be->be_length) < 0)
+ goto out_err;
+ if (decode_sector_number(&p, &be->be_v_offset) < 0)
+ goto out_err;
+ be->be_state = be32_to_cpup(p++);
+ if (be->be_state == PNFS_BLOCK_INVALID_DATA)
+ be->be_inval = &bl->bl_inval;
+ if (verify_extent(be, &lv)) {
+ dprintk("%s verify failed\n", __func__);
+ goto out_err;
+ }
+ list_add_tail(&be->be_node, &extents);
+ }
+ if (lgr->range.offset + lgr->range.length !=
+ lv.start << SECTOR_SHIFT) {
+ dprintk("%s Final length mismatch\n", __func__);
+ be = NULL;
+ goto out_err;
+ }
+ if (lv.start < lv.cowread) {
+ dprintk("%s Final uncovered COW extent\n", __func__);
+ be = NULL;
+ goto out_err;
+ }
+ /* Extents decoded properly, now try to merge them in to
+ * existing layout extents.
+ */
+ spin_lock(&bl->bl_ext_lock);
+ list_for_each_entry_safe(be, save, &extents, be_node) {
+ list_del(&be->be_node);
+ status = bl_add_merge_extent(bl, be);
+ if (status) {
+ spin_unlock(&bl->bl_ext_lock);
+ /* This is a fairly catastrophic error, as the
+ * entire layout extent lists are now corrupted.
+ * We should have some way to distinguish this.
+ */
+ be = NULL;
+ goto out_err;
+ }
+ }
+ spin_unlock(&bl->bl_ext_lock);
+ status = 0;
+ out:
+ __free_page(scratch);
+ dprintk("%s returns %i\n", __func__, status);
+ return status;
+
+ out_err:
+ bl_put_extent(be);
+ while (!list_empty(&extents)) {
+ be = list_first_entry(&extents, struct pnfs_block_extent,
+ be_node);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ }
+ goto out;
+}
diff --git a/fs/nfs/blocklayout/blocklayoutdm.c b/fs/nfs/blocklayout/blocklayoutdm.c
new file mode 100644
index 00000000..737d839b
--- /dev/null
+++ b/fs/nfs/blocklayout/blocklayoutdm.c
@@ -0,0 +1,114 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayoutdm.c
+ *
+ * Module for the NFSv4.1 pNFS block layout driver.
+ *
+ * Copyright (c) 2007 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Fred Isaman <iisaman@umich.edu>
+ * Andy Adamson <andros@citi.umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+
+#include <linux/genhd.h> /* gendisk - used in a dprintk*/
+#include <linux/sched.h>
+#include <linux/hash.h>
+
+#include "blocklayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+static void dev_remove(struct net *net, dev_t dev)
+{
+ struct bl_pipe_msg bl_pipe_msg;
+ struct rpc_pipe_msg *msg = &bl_pipe_msg.msg;
+ struct bl_dev_msg bl_umount_request;
+ struct bl_msg_hdr bl_msg = {
+ .type = BL_DEVICE_UMOUNT,
+ .totallen = sizeof(bl_umount_request),
+ };
+ uint8_t *dataptr;
+ DECLARE_WAITQUEUE(wq, current);
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ dprintk("Entering %s\n", __func__);
+
+ bl_pipe_msg.bl_wq = &nn->bl_wq;
+ memset(msg, 0, sizeof(*msg));
+ msg->data = kzalloc(1 + sizeof(bl_umount_request), GFP_NOFS);
+ if (!msg->data)
+ goto out;
+
+ memset(&bl_umount_request, 0, sizeof(bl_umount_request));
+ bl_umount_request.major = MAJOR(dev);
+ bl_umount_request.minor = MINOR(dev);
+
+ memcpy(msg->data, &bl_msg, sizeof(bl_msg));
+ dataptr = (uint8_t *) msg->data;
+ memcpy(&dataptr[sizeof(bl_msg)], &bl_umount_request, sizeof(bl_umount_request));
+ msg->len = sizeof(bl_msg) + bl_msg.totallen;
+
+ add_wait_queue(&nn->bl_wq, &wq);
+ if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) {
+ remove_wait_queue(&nn->bl_wq, &wq);
+ goto out;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule();
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&nn->bl_wq, &wq);
+
+out:
+ kfree(msg->data);
+}
+
+/*
+ * Release meta device
+ */
+static void nfs4_blk_metadev_release(struct pnfs_block_dev *bdev)
+{
+ int rv;
+
+ dprintk("%s Releasing\n", __func__);
+ rv = nfs4_blkdev_put(bdev->bm_mdev);
+ if (rv)
+ printk(KERN_ERR "NFS: %s nfs4_blkdev_put returns %d\n",
+ __func__, rv);
+
+ dev_remove(bdev->net, bdev->bm_mdev->bd_dev);
+}
+
+void bl_free_block_dev(struct pnfs_block_dev *bdev)
+{
+ if (bdev) {
+ if (bdev->bm_mdev) {
+ dprintk("%s Removing DM device: %d:%d\n",
+ __func__,
+ MAJOR(bdev->bm_mdev->bd_dev),
+ MINOR(bdev->bm_mdev->bd_dev));
+ nfs4_blk_metadev_release(bdev);
+ }
+ kfree(bdev);
+ }
+}
diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c
new file mode 100644
index 00000000..1f9a6032
--- /dev/null
+++ b/fs/nfs/blocklayout/extents.c
@@ -0,0 +1,909 @@
+/*
+ * linux/fs/nfs/blocklayout/blocklayout.h
+ *
+ * Module for the NFSv4.1 pNFS block layout driver.
+ *
+ * Copyright (c) 2006 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Andy Adamson <andros@citi.umich.edu>
+ * Fred Isaman <iisaman@umich.edu>
+ *
+ * permission is granted to use, copy, create derivative works and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the university of michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. if
+ * the above copyright notice or any other identification of the
+ * university of michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * this software is provided as is, without representation from the
+ * university of michigan as to its fitness for any purpose, and without
+ * warranty by the university of michigan of any kind, either express
+ * or implied, including without limitation the implied warranties of
+ * merchantability and fitness for a particular purpose. the regents
+ * of the university of michigan shall not be liable for any damages,
+ * including special, indirect, incidental, or consequential damages,
+ * with respect to any claim arising out or in connection with the use
+ * of the software, even if it has been or is hereafter advised of the
+ * possibility of such damages.
+ */
+
+#include "blocklayout.h"
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+/* Bit numbers */
+#define EXTENT_INITIALIZED 0
+#define EXTENT_WRITTEN 1
+#define EXTENT_IN_COMMIT 2
+#define INTERNAL_EXISTS MY_MAX_TAGS
+#define INTERNAL_MASK ((1 << INTERNAL_EXISTS) - 1)
+
+/* Returns largest t<=s s.t. t%base==0 */
+static inline sector_t normalize(sector_t s, int base)
+{
+ sector_t tmp = s; /* Since do_div modifies its argument */
+ return s - do_div(tmp, base);
+}
+
+static inline sector_t normalize_up(sector_t s, int base)
+{
+ return normalize(s + base - 1, base);
+}
+
+/* Complete stub using list while determine API wanted */
+
+/* Returns tags, or negative */
+static int32_t _find_entry(struct my_tree *tree, u64 s)
+{
+ struct pnfs_inval_tracking *pos;
+
+ dprintk("%s(%llu) enter\n", __func__, s);
+ list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
+ if (pos->it_sector > s)
+ continue;
+ else if (pos->it_sector == s)
+ return pos->it_tags & INTERNAL_MASK;
+ else
+ break;
+ }
+ return -ENOENT;
+}
+
+static inline
+int _has_tag(struct my_tree *tree, u64 s, int32_t tag)
+{
+ int32_t tags;
+
+ dprintk("%s(%llu, %i) enter\n", __func__, s, tag);
+ s = normalize(s, tree->mtt_step_size);
+ tags = _find_entry(tree, s);
+ if ((tags < 0) || !(tags & (1 << tag)))
+ return 0;
+ else
+ return 1;
+}
+
+/* Creates entry with tag, or if entry already exists, unions tag to it.
+ * If storage is not NULL, newly created entry will use it.
+ * Returns number of entries added, or negative on error.
+ */
+static int _add_entry(struct my_tree *tree, u64 s, int32_t tag,
+ struct pnfs_inval_tracking *storage)
+{
+ int found = 0;
+ struct pnfs_inval_tracking *pos;
+
+ dprintk("%s(%llu, %i, %p) enter\n", __func__, s, tag, storage);
+ list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
+ if (pos->it_sector > s)
+ continue;
+ else if (pos->it_sector == s) {
+ found = 1;
+ break;
+ } else
+ break;
+ }
+ if (found) {
+ pos->it_tags |= (1 << tag);
+ return 0;
+ } else {
+ struct pnfs_inval_tracking *new;
+ new = storage;
+ new->it_sector = s;
+ new->it_tags = (1 << tag);
+ list_add(&new->it_link, &pos->it_link);
+ return 1;
+ }
+}
+
+/* XXXX Really want option to not create */
+/* Over range, unions tag with existing entries, else creates entry with tag */
+static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
+{
+ u64 i;
+
+ dprintk("%s(%i, %llu, %llu) enter\n", __func__, tag, s, length);
+ for (i = normalize(s, tree->mtt_step_size); i < s + length;
+ i += tree->mtt_step_size)
+ if (_add_entry(tree, i, tag, NULL))
+ return -ENOMEM;
+ return 0;
+}
+
+/* Ensure that future operations on given range of tree will not malloc */
+static int _preload_range(struct pnfs_inval_markings *marks,
+ u64 offset, u64 length)
+{
+ u64 start, end, s;
+ int count, i, used = 0, status = -ENOMEM;
+ struct pnfs_inval_tracking **storage;
+ struct my_tree *tree = &marks->im_tree;
+
+ dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
+ start = normalize(offset, tree->mtt_step_size);
+ end = normalize_up(offset + length, tree->mtt_step_size);
+ count = (int)(end - start) / (int)tree->mtt_step_size;
+
+ /* Pre-malloc what memory we might need */
+ storage = kcalloc(count, sizeof(*storage), GFP_NOFS);
+ if (!storage)
+ return -ENOMEM;
+ for (i = 0; i < count; i++) {
+ storage[i] = kmalloc(sizeof(struct pnfs_inval_tracking),
+ GFP_NOFS);
+ if (!storage[i])
+ goto out_cleanup;
+ }
+
+ spin_lock_bh(&marks->im_lock);
+ for (s = start; s < end; s += tree->mtt_step_size)
+ used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
+ spin_unlock_bh(&marks->im_lock);
+
+ status = 0;
+
+ out_cleanup:
+ for (i = used; i < count; i++) {
+ if (!storage[i])
+ break;
+ kfree(storage[i]);
+ }
+ kfree(storage);
+ return status;
+}
+
+/* We are relying on page lock to serialize this */
+int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect)
+{
+ int rv;
+
+ spin_lock_bh(&marks->im_lock);
+ rv = _has_tag(&marks->im_tree, isect, EXTENT_INITIALIZED);
+ spin_unlock_bh(&marks->im_lock);
+ return rv;
+}
+
+/* Assume start, end already sector aligned */
+static int
+_range_has_tag(struct my_tree *tree, u64 start, u64 end, int32_t tag)
+{
+ struct pnfs_inval_tracking *pos;
+ u64 expect = 0;
+
+ dprintk("%s(%llu, %llu, %i) enter\n", __func__, start, end, tag);
+ list_for_each_entry_reverse(pos, &tree->mtt_stub, it_link) {
+ if (pos->it_sector >= end)
+ continue;
+ if (!expect) {
+ if ((pos->it_sector == end - tree->mtt_step_size) &&
+ (pos->it_tags & (1 << tag))) {
+ expect = pos->it_sector - tree->mtt_step_size;
+ if (pos->it_sector < tree->mtt_step_size || expect < start)
+ return 1;
+ continue;
+ } else {
+ return 0;
+ }
+ }
+ if (pos->it_sector != expect || !(pos->it_tags & (1 << tag)))
+ return 0;
+ expect -= tree->mtt_step_size;
+ if (expect < start)
+ return 1;
+ }
+ return 0;
+}
+
+static int is_range_written(struct pnfs_inval_markings *marks,
+ sector_t start, sector_t end)
+{
+ int rv;
+
+ spin_lock_bh(&marks->im_lock);
+ rv = _range_has_tag(&marks->im_tree, start, end, EXTENT_WRITTEN);
+ spin_unlock_bh(&marks->im_lock);
+ return rv;
+}
+
+/* Marks sectors in [offest, offset_length) as having been initialized.
+ * All lengths are step-aligned, where step is min(pagesize, blocksize).
+ * Currently assumes offset is page-aligned
+ */
+int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
+ sector_t offset, sector_t length)
+{
+ sector_t start, end;
+
+ dprintk("%s(offset=%llu,len=%llu) enter\n",
+ __func__, (u64)offset, (u64)length);
+
+ start = normalize(offset, marks->im_block_size);
+ end = normalize_up(offset + length, marks->im_block_size);
+ if (_preload_range(marks, start, end - start))
+ goto outerr;
+
+ spin_lock_bh(&marks->im_lock);
+ if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length))
+ goto out_unlock;
+ spin_unlock_bh(&marks->im_lock);
+
+ return 0;
+
+out_unlock:
+ spin_unlock_bh(&marks->im_lock);
+outerr:
+ return -ENOMEM;
+}
+
+/* Marks sectors in [offest, offset+length) as having been written to disk.
+ * All lengths should be block aligned.
+ */
+static int mark_written_sectors(struct pnfs_inval_markings *marks,
+ sector_t offset, sector_t length)
+{
+ int status;
+
+ dprintk("%s(offset=%llu,len=%llu) enter\n", __func__,
+ (u64)offset, (u64)length);
+ spin_lock_bh(&marks->im_lock);
+ status = _set_range(&marks->im_tree, EXTENT_WRITTEN, offset, length);
+ spin_unlock_bh(&marks->im_lock);
+ return status;
+}
+
+static void print_short_extent(struct pnfs_block_short_extent *be)
+{
+ dprintk("PRINT SHORT EXTENT extent %p\n", be);
+ if (be) {
+ dprintk(" be_f_offset %llu\n", (u64)be->bse_f_offset);
+ dprintk(" be_length %llu\n", (u64)be->bse_length);
+ }
+}
+
+static void print_clist(struct list_head *list, unsigned int count)
+{
+ struct pnfs_block_short_extent *be;
+ unsigned int i = 0;
+
+ ifdebug(FACILITY) {
+ printk(KERN_DEBUG "****************\n");
+ printk(KERN_DEBUG "Extent list looks like:\n");
+ list_for_each_entry(be, list, bse_node) {
+ i++;
+ print_short_extent(be);
+ }
+ if (i != count)
+ printk(KERN_DEBUG "\n\nExpected %u entries\n\n\n", count);
+ printk(KERN_DEBUG "****************\n");
+ }
+}
+
+/* Note: In theory, we should do more checking that devid's match between
+ * old and new, but if they don't, the lists are too corrupt to salvage anyway.
+ */
+/* Note this is very similar to bl_add_merge_extent */
+static void add_to_commitlist(struct pnfs_block_layout *bl,
+ struct pnfs_block_short_extent *new)
+{
+ struct list_head *clist = &bl->bl_commit;
+ struct pnfs_block_short_extent *old, *save;
+ sector_t end = new->bse_f_offset + new->bse_length;
+
+ dprintk("%s enter\n", __func__);
+ print_short_extent(new);
+ print_clist(clist, bl->bl_count);
+ bl->bl_count++;
+ /* Scan for proper place to insert, extending new to the left
+ * as much as possible.
+ */
+ list_for_each_entry_safe(old, save, clist, bse_node) {
+ if (new->bse_f_offset < old->bse_f_offset)
+ break;
+ if (end <= old->bse_f_offset + old->bse_length) {
+ /* Range is already in list */
+ bl->bl_count--;
+ kfree(new);
+ return;
+ } else if (new->bse_f_offset <=
+ old->bse_f_offset + old->bse_length) {
+ /* new overlaps or abuts existing be */
+ if (new->bse_mdev == old->bse_mdev) {
+ /* extend new to fully replace old */
+ new->bse_length += new->bse_f_offset -
+ old->bse_f_offset;
+ new->bse_f_offset = old->bse_f_offset;
+ list_del(&old->bse_node);
+ bl->bl_count--;
+ kfree(old);
+ }
+ }
+ }
+ /* Note that if we never hit the above break, old will not point to a
+ * valid extent. However, in that case &old->bse_node==list.
+ */
+ list_add_tail(&new->bse_node, &old->bse_node);
+ /* Scan forward for overlaps. If we find any, extend new and
+ * remove the overlapped extent.
+ */
+ old = list_prepare_entry(new, clist, bse_node);
+ list_for_each_entry_safe_continue(old, save, clist, bse_node) {
+ if (end < old->bse_f_offset)
+ break;
+ /* new overlaps or abuts old */
+ if (new->bse_mdev == old->bse_mdev) {
+ if (end < old->bse_f_offset + old->bse_length) {
+ /* extend new to fully cover old */
+ end = old->bse_f_offset + old->bse_length;
+ new->bse_length = end - new->bse_f_offset;
+ }
+ list_del(&old->bse_node);
+ bl->bl_count--;
+ kfree(old);
+ }
+ }
+ dprintk("%s: after merging\n", __func__);
+ print_clist(clist, bl->bl_count);
+}
+
+/* Note the range described by offset, length is guaranteed to be contained
+ * within be.
+ * new will be freed, either by this function or add_to_commitlist if they
+ * decide not to use it, or after LAYOUTCOMMIT uses it in the commitlist.
+ */
+int bl_mark_for_commit(struct pnfs_block_extent *be,
+ sector_t offset, sector_t length,
+ struct pnfs_block_short_extent *new)
+{
+ sector_t new_end, end = offset + length;
+ struct pnfs_block_layout *bl = container_of(be->be_inval,
+ struct pnfs_block_layout,
+ bl_inval);
+
+ mark_written_sectors(be->be_inval, offset, length);
+ /* We want to add the range to commit list, but it must be
+ * block-normalized, and verified that the normalized range has
+ * been entirely written to disk.
+ */
+ new->bse_f_offset = offset;
+ offset = normalize(offset, bl->bl_blocksize);
+ if (offset < new->bse_f_offset) {
+ if (is_range_written(be->be_inval, offset, new->bse_f_offset))
+ new->bse_f_offset = offset;
+ else
+ new->bse_f_offset = offset + bl->bl_blocksize;
+ }
+ new_end = normalize_up(end, bl->bl_blocksize);
+ if (end < new_end) {
+ if (is_range_written(be->be_inval, end, new_end))
+ end = new_end;
+ else
+ end = new_end - bl->bl_blocksize;
+ }
+ if (end <= new->bse_f_offset) {
+ kfree(new);
+ return 0;
+ }
+ new->bse_length = end - new->bse_f_offset;
+ new->bse_devid = be->be_devid;
+ new->bse_mdev = be->be_mdev;
+
+ spin_lock(&bl->bl_ext_lock);
+ add_to_commitlist(bl, new);
+ spin_unlock(&bl->bl_ext_lock);
+ return 0;
+}
+
+static void print_bl_extent(struct pnfs_block_extent *be)
+{
+ dprintk("PRINT EXTENT extent %p\n", be);
+ if (be) {
+ dprintk(" be_f_offset %llu\n", (u64)be->be_f_offset);
+ dprintk(" be_length %llu\n", (u64)be->be_length);
+ dprintk(" be_v_offset %llu\n", (u64)be->be_v_offset);
+ dprintk(" be_state %d\n", be->be_state);
+ }
+}
+
+static void
+destroy_extent(struct kref *kref)
+{
+ struct pnfs_block_extent *be;
+
+ be = container_of(kref, struct pnfs_block_extent, be_refcnt);
+ dprintk("%s be=%p\n", __func__, be);
+ kfree(be);
+}
+
+void
+bl_put_extent(struct pnfs_block_extent *be)
+{
+ if (be) {
+ dprintk("%s enter %p (%i)\n", __func__, be,
+ atomic_read(&be->be_refcnt.refcount));
+ kref_put(&be->be_refcnt, destroy_extent);
+ }
+}
+
+struct pnfs_block_extent *bl_alloc_extent(void)
+{
+ struct pnfs_block_extent *be;
+
+ be = kmalloc(sizeof(struct pnfs_block_extent), GFP_NOFS);
+ if (!be)
+ return NULL;
+ INIT_LIST_HEAD(&be->be_node);
+ kref_init(&be->be_refcnt);
+ be->be_inval = NULL;
+ return be;
+}
+
+static void print_elist(struct list_head *list)
+{
+ struct pnfs_block_extent *be;
+ dprintk("****************\n");
+ dprintk("Extent list looks like:\n");
+ list_for_each_entry(be, list, be_node) {
+ print_bl_extent(be);
+ }
+ dprintk("****************\n");
+}
+
+static inline int
+extents_consistent(struct pnfs_block_extent *old, struct pnfs_block_extent *new)
+{
+ /* Note this assumes new->be_f_offset >= old->be_f_offset */
+ return (new->be_state == old->be_state) &&
+ ((new->be_state == PNFS_BLOCK_NONE_DATA) ||
+ ((new->be_v_offset - old->be_v_offset ==
+ new->be_f_offset - old->be_f_offset) &&
+ new->be_mdev == old->be_mdev));
+}
+
+/* Adds new to appropriate list in bl, modifying new and removing existing
+ * extents as appropriate to deal with overlaps.
+ *
+ * See bl_find_get_extent for list constraints.
+ *
+ * Refcount on new is already set. If end up not using it, or error out,
+ * need to put the reference.
+ *
+ * bl->bl_ext_lock is held by caller.
+ */
+int
+bl_add_merge_extent(struct pnfs_block_layout *bl,
+ struct pnfs_block_extent *new)
+{
+ struct pnfs_block_extent *be, *tmp;
+ sector_t end = new->be_f_offset + new->be_length;
+ struct list_head *list;
+
+ dprintk("%s enter with be=%p\n", __func__, new);
+ print_bl_extent(new);
+ list = &bl->bl_extents[bl_choose_list(new->be_state)];
+ print_elist(list);
+
+ /* Scan for proper place to insert, extending new to the left
+ * as much as possible.
+ */
+ list_for_each_entry_safe_reverse(be, tmp, list, be_node) {
+ if (new->be_f_offset >= be->be_f_offset + be->be_length)
+ break;
+ if (new->be_f_offset >= be->be_f_offset) {
+ if (end <= be->be_f_offset + be->be_length) {
+ /* new is a subset of existing be*/
+ if (extents_consistent(be, new)) {
+ dprintk("%s: new is subset, ignoring\n",
+ __func__);
+ bl_put_extent(new);
+ return 0;
+ } else {
+ goto out_err;
+ }
+ } else {
+ /* |<-- be -->|
+ * |<-- new -->| */
+ if (extents_consistent(be, new)) {
+ /* extend new to fully replace be */
+ new->be_length += new->be_f_offset -
+ be->be_f_offset;
+ new->be_f_offset = be->be_f_offset;
+ new->be_v_offset = be->be_v_offset;
+ dprintk("%s: removing %p\n", __func__, be);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ } else {
+ goto out_err;
+ }
+ }
+ } else if (end >= be->be_f_offset + be->be_length) {
+ /* new extent overlap existing be */
+ if (extents_consistent(be, new)) {
+ /* extend new to fully replace be */
+ dprintk("%s: removing %p\n", __func__, be);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ } else {
+ goto out_err;
+ }
+ } else if (end > be->be_f_offset) {
+ /* |<-- be -->|
+ *|<-- new -->| */
+ if (extents_consistent(new, be)) {
+ /* extend new to fully replace be */
+ new->be_length += be->be_f_offset + be->be_length -
+ new->be_f_offset - new->be_length;
+ dprintk("%s: removing %p\n", __func__, be);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ } else {
+ goto out_err;
+ }
+ }
+ }
+ /* Note that if we never hit the above break, be will not point to a
+ * valid extent. However, in that case &be->be_node==list.
+ */
+ list_add(&new->be_node, &be->be_node);
+ dprintk("%s: inserting new\n", __func__);
+ print_elist(list);
+ /* FIXME - The per-list consistency checks have all been done,
+ * should now check cross-list consistency.
+ */
+ return 0;
+
+ out_err:
+ bl_put_extent(new);
+ return -EIO;
+}
+
+/* Returns extent, or NULL. If a second READ extent exists, it is returned
+ * in cow_read, if given.
+ *
+ * The extents are kept in two seperate ordered lists, one for READ and NONE,
+ * one for READWRITE and INVALID. Within each list, we assume:
+ * 1. Extents are ordered by file offset.
+ * 2. For any given isect, there is at most one extents that matches.
+ */
+struct pnfs_block_extent *
+bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect,
+ struct pnfs_block_extent **cow_read)
+{
+ struct pnfs_block_extent *be, *cow, *ret;
+ int i;
+
+ dprintk("%s enter with isect %llu\n", __func__, (u64)isect);
+ cow = ret = NULL;
+ spin_lock(&bl->bl_ext_lock);
+ for (i = 0; i < EXTENT_LISTS; i++) {
+ list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) {
+ if (isect >= be->be_f_offset + be->be_length)
+ break;
+ if (isect >= be->be_f_offset) {
+ /* We have found an extent */
+ dprintk("%s Get %p (%i)\n", __func__, be,
+ atomic_read(&be->be_refcnt.refcount));
+ kref_get(&be->be_refcnt);
+ if (!ret)
+ ret = be;
+ else if (be->be_state != PNFS_BLOCK_READ_DATA)
+ bl_put_extent(be);
+ else
+ cow = be;
+ break;
+ }
+ }
+ if (ret &&
+ (!cow_read || ret->be_state != PNFS_BLOCK_INVALID_DATA))
+ break;
+ }
+ spin_unlock(&bl->bl_ext_lock);
+ if (cow_read)
+ *cow_read = cow;
+ print_bl_extent(ret);
+ return ret;
+}
+
+/* Similar to bl_find_get_extent, but called with lock held, and ignores cow */
+static struct pnfs_block_extent *
+bl_find_get_extent_locked(struct pnfs_block_layout *bl, sector_t isect)
+{
+ struct pnfs_block_extent *be, *ret = NULL;
+ int i;
+
+ dprintk("%s enter with isect %llu\n", __func__, (u64)isect);
+ for (i = 0; i < EXTENT_LISTS; i++) {
+ if (ret)
+ break;
+ list_for_each_entry_reverse(be, &bl->bl_extents[i], be_node) {
+ if (isect >= be->be_f_offset + be->be_length)
+ break;
+ if (isect >= be->be_f_offset) {
+ /* We have found an extent */
+ dprintk("%s Get %p (%i)\n", __func__, be,
+ atomic_read(&be->be_refcnt.refcount));
+ kref_get(&be->be_refcnt);
+ ret = be;
+ break;
+ }
+ }
+ }
+ print_bl_extent(ret);
+ return ret;
+}
+
+int
+encode_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *arg)
+{
+ struct pnfs_block_short_extent *lce, *save;
+ unsigned int count = 0;
+ __be32 *p, *xdr_start;
+
+ dprintk("%s enter\n", __func__);
+ /* BUG - creation of bl_commit is buggy - need to wait for
+ * entire block to be marked WRITTEN before it can be added.
+ */
+ spin_lock(&bl->bl_ext_lock);
+ /* Want to adjust for possible truncate */
+ /* We now want to adjust argument range */
+
+ /* XDR encode the ranges found */
+ xdr_start = xdr_reserve_space(xdr, 8);
+ if (!xdr_start)
+ goto out;
+ list_for_each_entry_safe(lce, save, &bl->bl_commit, bse_node) {
+ p = xdr_reserve_space(xdr, 7 * 4 + sizeof(lce->bse_devid.data));
+ if (!p)
+ break;
+ p = xdr_encode_opaque_fixed(p, lce->bse_devid.data, NFS4_DEVICEID4_SIZE);
+ p = xdr_encode_hyper(p, lce->bse_f_offset << SECTOR_SHIFT);
+ p = xdr_encode_hyper(p, lce->bse_length << SECTOR_SHIFT);
+ p = xdr_encode_hyper(p, 0LL);
+ *p++ = cpu_to_be32(PNFS_BLOCK_READWRITE_DATA);
+ list_del(&lce->bse_node);
+ list_add_tail(&lce->bse_node, &bl->bl_committing);
+ bl->bl_count--;
+ count++;
+ }
+ xdr_start[0] = cpu_to_be32((xdr->p - xdr_start - 1) * 4);
+ xdr_start[1] = cpu_to_be32(count);
+out:
+ spin_unlock(&bl->bl_ext_lock);
+ dprintk("%s found %i ranges\n", __func__, count);
+ return 0;
+}
+
+/* Helper function to set_to_rw that initialize a new extent */
+static void
+_prep_new_extent(struct pnfs_block_extent *new,
+ struct pnfs_block_extent *orig,
+ sector_t offset, sector_t length, int state)
+{
+ kref_init(&new->be_refcnt);
+ /* don't need to INIT_LIST_HEAD(&new->be_node) */
+ memcpy(&new->be_devid, &orig->be_devid, sizeof(struct nfs4_deviceid));
+ new->be_mdev = orig->be_mdev;
+ new->be_f_offset = offset;
+ new->be_length = length;
+ new->be_v_offset = orig->be_v_offset - orig->be_f_offset + offset;
+ new->be_state = state;
+ new->be_inval = orig->be_inval;
+}
+
+/* Tries to merge be with extent in front of it in list.
+ * Frees storage if not used.
+ */
+static struct pnfs_block_extent *
+_front_merge(struct pnfs_block_extent *be, struct list_head *head,
+ struct pnfs_block_extent *storage)
+{
+ struct pnfs_block_extent *prev;
+
+ if (!storage)
+ goto no_merge;
+ if (&be->be_node == head || be->be_node.prev == head)
+ goto no_merge;
+ prev = list_entry(be->be_node.prev, struct pnfs_block_extent, be_node);
+ if ((prev->be_f_offset + prev->be_length != be->be_f_offset) ||
+ !extents_consistent(prev, be))
+ goto no_merge;
+ _prep_new_extent(storage, prev, prev->be_f_offset,
+ prev->be_length + be->be_length, prev->be_state);
+ list_replace(&prev->be_node, &storage->be_node);
+ bl_put_extent(prev);
+ list_del(&be->be_node);
+ bl_put_extent(be);
+ return storage;
+
+ no_merge:
+ kfree(storage);
+ return be;
+}
+
+static u64
+set_to_rw(struct pnfs_block_layout *bl, u64 offset, u64 length)
+{
+ u64 rv = offset + length;
+ struct pnfs_block_extent *be, *e1, *e2, *e3, *new, *old;
+ struct pnfs_block_extent *children[3];
+ struct pnfs_block_extent *merge1 = NULL, *merge2 = NULL;
+ int i = 0, j;
+
+ dprintk("%s(%llu, %llu)\n", __func__, offset, length);
+ /* Create storage for up to three new extents e1, e2, e3 */
+ e1 = kmalloc(sizeof(*e1), GFP_ATOMIC);
+ e2 = kmalloc(sizeof(*e2), GFP_ATOMIC);
+ e3 = kmalloc(sizeof(*e3), GFP_ATOMIC);
+ /* BUG - we are ignoring any failure */
+ if (!e1 || !e2 || !e3)
+ goto out_nosplit;
+
+ spin_lock(&bl->bl_ext_lock);
+ be = bl_find_get_extent_locked(bl, offset);
+ rv = be->be_f_offset + be->be_length;
+ if (be->be_state != PNFS_BLOCK_INVALID_DATA) {
+ spin_unlock(&bl->bl_ext_lock);
+ goto out_nosplit;
+ }
+ /* Add e* to children, bumping e*'s krefs */
+ if (be->be_f_offset != offset) {
+ _prep_new_extent(e1, be, be->be_f_offset,
+ offset - be->be_f_offset,
+ PNFS_BLOCK_INVALID_DATA);
+ children[i++] = e1;
+ print_bl_extent(e1);
+ } else
+ merge1 = e1;
+ _prep_new_extent(e2, be, offset,
+ min(length, be->be_f_offset + be->be_length - offset),
+ PNFS_BLOCK_READWRITE_DATA);
+ children[i++] = e2;
+ print_bl_extent(e2);
+ if (offset + length < be->be_f_offset + be->be_length) {
+ _prep_new_extent(e3, be, e2->be_f_offset + e2->be_length,
+ be->be_f_offset + be->be_length -
+ offset - length,
+ PNFS_BLOCK_INVALID_DATA);
+ children[i++] = e3;
+ print_bl_extent(e3);
+ } else
+ merge2 = e3;
+
+ /* Remove be from list, and insert the e* */
+ /* We don't get refs on e*, since this list is the base reference
+ * set when init'ed.
+ */
+ if (i < 3)
+ children[i] = NULL;
+ new = children[0];
+ list_replace(&be->be_node, &new->be_node);
+ bl_put_extent(be);
+ new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge1);
+ for (j = 1; j < i; j++) {
+ old = new;
+ new = children[j];
+ list_add(&new->be_node, &old->be_node);
+ }
+ if (merge2) {
+ /* This is a HACK, should just create a _back_merge function */
+ new = list_entry(new->be_node.next,
+ struct pnfs_block_extent, be_node);
+ new = _front_merge(new, &bl->bl_extents[RW_EXTENT], merge2);
+ }
+ spin_unlock(&bl->bl_ext_lock);
+
+ /* Since we removed the base reference above, be is now scheduled for
+ * destruction.
+ */
+ bl_put_extent(be);
+ dprintk("%s returns %llu after split\n", __func__, rv);
+ return rv;
+
+ out_nosplit:
+ kfree(e1);
+ kfree(e2);
+ kfree(e3);
+ dprintk("%s returns %llu without splitting\n", __func__, rv);
+ return rv;
+}
+
+void
+clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
+ const struct nfs4_layoutcommit_args *arg,
+ int status)
+{
+ struct pnfs_block_short_extent *lce, *save;
+
+ dprintk("%s status %d\n", __func__, status);
+ list_for_each_entry_safe(lce, save, &bl->bl_committing, bse_node) {
+ if (likely(!status)) {
+ u64 offset = lce->bse_f_offset;
+ u64 end = offset + lce->bse_length;
+
+ do {
+ offset = set_to_rw(bl, offset, end - offset);
+ } while (offset < end);
+ list_del(&lce->bse_node);
+
+ kfree(lce);
+ } else {
+ list_del(&lce->bse_node);
+ spin_lock(&bl->bl_ext_lock);
+ add_to_commitlist(bl, lce);
+ spin_unlock(&bl->bl_ext_lock);
+ }
+ }
+}
+
+int bl_push_one_short_extent(struct pnfs_inval_markings *marks)
+{
+ struct pnfs_block_short_extent *new;
+
+ new = kmalloc(sizeof(*new), GFP_NOFS);
+ if (unlikely(!new))
+ return -ENOMEM;
+
+ spin_lock_bh(&marks->im_lock);
+ list_add(&new->bse_node, &marks->im_extents);
+ spin_unlock_bh(&marks->im_lock);
+
+ return 0;
+}
+
+struct pnfs_block_short_extent *
+bl_pop_one_short_extent(struct pnfs_inval_markings *marks)
+{
+ struct pnfs_block_short_extent *rv = NULL;
+
+ spin_lock_bh(&marks->im_lock);
+ if (!list_empty(&marks->im_extents)) {
+ rv = list_entry((&marks->im_extents)->next,
+ struct pnfs_block_short_extent, bse_node);
+ list_del_init(&rv->bse_node);
+ }
+ spin_unlock_bh(&marks->im_lock);
+
+ return rv;
+}
+
+void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free)
+{
+ struct pnfs_block_short_extent *se = NULL, *tmp;
+
+ if (num_to_free <= 0)
+ return;
+
+ spin_lock(&marks->im_lock);
+ list_for_each_entry_safe(se, tmp, &marks->im_extents, bse_node) {
+ list_del(&se->bse_node);
+ kfree(se);
+ if (--num_to_free == 0)
+ break;
+ }
+ spin_unlock(&marks->im_lock);
+
+ BUG_ON(num_to_free > 0);
+}
diff --git a/fs/nfs/cache_lib.c b/fs/nfs/cache_lib.c
new file mode 100644
index 00000000..dded2636
--- /dev/null
+++ b/fs/nfs/cache_lib.c
@@ -0,0 +1,165 @@
+/*
+ * linux/fs/nfs/cache_lib.c
+ *
+ * Helper routines for the NFS client caches
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ */
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/slab.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <net/net_namespace.h>
+
+#include "cache_lib.h"
+
+#define NFS_CACHE_UPCALL_PATHLEN 256
+#define NFS_CACHE_UPCALL_TIMEOUT 15
+
+static char nfs_cache_getent_prog[NFS_CACHE_UPCALL_PATHLEN] =
+ "/sbin/nfs_cache_getent";
+static unsigned long nfs_cache_getent_timeout = NFS_CACHE_UPCALL_TIMEOUT;
+
+module_param_string(cache_getent, nfs_cache_getent_prog,
+ sizeof(nfs_cache_getent_prog), 0600);
+MODULE_PARM_DESC(cache_getent, "Path to the client cache upcall program");
+module_param_named(cache_getent_timeout, nfs_cache_getent_timeout, ulong, 0600);
+MODULE_PARM_DESC(cache_getent_timeout, "Timeout (in seconds) after which "
+ "the cache upcall is assumed to have failed");
+
+int nfs_cache_upcall(struct cache_detail *cd, char *entry_name)
+{
+ static char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ NULL
+ };
+ char *argv[] = {
+ nfs_cache_getent_prog,
+ cd->name,
+ entry_name,
+ NULL
+ };
+ int ret = -EACCES;
+
+ if (nfs_cache_getent_prog[0] == '\0')
+ goto out;
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+ /*
+ * Disable the upcall mechanism if we're getting an ENOENT or
+ * EACCES error. The admin can re-enable it on the fly by using
+ * sysfs to set the 'cache_getent' parameter once the problem
+ * has been fixed.
+ */
+ if (ret == -ENOENT || ret == -EACCES)
+ nfs_cache_getent_prog[0] = '\0';
+out:
+ return ret > 0 ? 0 : ret;
+}
+
+/*
+ * Deferred request handling
+ */
+void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq)
+{
+ if (atomic_dec_and_test(&dreq->count))
+ kfree(dreq);
+}
+
+static void nfs_dns_cache_revisit(struct cache_deferred_req *d, int toomany)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = container_of(d, struct nfs_cache_defer_req, deferred_req);
+
+ complete_all(&dreq->completion);
+ nfs_cache_defer_req_put(dreq);
+}
+
+static struct cache_deferred_req *nfs_dns_cache_defer(struct cache_req *req)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = container_of(req, struct nfs_cache_defer_req, req);
+ dreq->deferred_req.revisit = nfs_dns_cache_revisit;
+ atomic_inc(&dreq->count);
+
+ return &dreq->deferred_req;
+}
+
+struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void)
+{
+ struct nfs_cache_defer_req *dreq;
+
+ dreq = kzalloc(sizeof(*dreq), GFP_KERNEL);
+ if (dreq) {
+ init_completion(&dreq->completion);
+ atomic_set(&dreq->count, 1);
+ dreq->req.defer = nfs_dns_cache_defer;
+ }
+ return dreq;
+}
+
+int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq)
+{
+ if (wait_for_completion_timeout(&dreq->completion,
+ nfs_cache_getent_timeout * HZ) == 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+int nfs_cache_register_sb(struct super_block *sb, struct cache_detail *cd)
+{
+ int ret;
+ struct dentry *dir;
+
+ dir = rpc_d_lookup_sb(sb, "cache");
+ BUG_ON(dir == NULL);
+ ret = sunrpc_cache_register_pipefs(dir, cd->name, 0600, cd);
+ dput(dir);
+ return ret;
+}
+
+int nfs_cache_register_net(struct net *net, struct cache_detail *cd)
+{
+ struct super_block *pipefs_sb;
+ int ret = 0;
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+ ret = nfs_cache_register_sb(pipefs_sb, cd);
+ rpc_put_sb_net(net);
+ }
+ return ret;
+}
+
+void nfs_cache_unregister_sb(struct super_block *sb, struct cache_detail *cd)
+{
+ if (cd->u.pipefs.dir)
+ sunrpc_cache_unregister_pipefs(cd);
+}
+
+void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd)
+{
+ struct super_block *pipefs_sb;
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+ nfs_cache_unregister_sb(pipefs_sb, cd);
+ rpc_put_sb_net(net);
+ }
+}
+
+void nfs_cache_init(struct cache_detail *cd)
+{
+ sunrpc_init_cache_detail(cd);
+}
+
+void nfs_cache_destroy(struct cache_detail *cd)
+{
+ sunrpc_destroy_cache_detail(cd);
+}
diff --git a/fs/nfs/cache_lib.h b/fs/nfs/cache_lib.h
new file mode 100644
index 00000000..317db95e
--- /dev/null
+++ b/fs/nfs/cache_lib.h
@@ -0,0 +1,33 @@
+/*
+ * Helper routines for the NFS client caches
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/atomic.h>
+
+/*
+ * Deferred request handling
+ */
+struct nfs_cache_defer_req {
+ struct cache_req req;
+ struct cache_deferred_req deferred_req;
+ struct completion completion;
+ atomic_t count;
+};
+
+extern int nfs_cache_upcall(struct cache_detail *cd, char *entry_name);
+extern struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void);
+extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq);
+extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
+
+extern void nfs_cache_init(struct cache_detail *cd);
+extern void nfs_cache_destroy(struct cache_detail *cd);
+extern int nfs_cache_register_net(struct net *net, struct cache_detail *cd);
+extern void nfs_cache_unregister_net(struct net *net, struct cache_detail *cd);
+extern int nfs_cache_register_sb(struct super_block *sb,
+ struct cache_detail *cd);
+extern void nfs_cache_unregister_sb(struct super_block *sb,
+ struct cache_detail *cd);
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
new file mode 100644
index 00000000..38a44c67
--- /dev/null
+++ b/fs/nfs/callback.c
@@ -0,0 +1,412 @@
+/*
+ * linux/fs/nfs/callback.c
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFSv4 callback handling
+ */
+
+#include <linux/completion.h>
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/sunrpc/svcsock.h>
+#include <linux/nfs_fs.h>
+#include <linux/mutex.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/sunrpc/svcauth_gss.h>
+#include <linux/sunrpc/bc_xprt.h>
+
+#include <net/inet_sock.h>
+
+#include "nfs4_fs.h"
+#include "callback.h"
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_CALLBACK
+
+struct nfs_callback_data {
+ unsigned int users;
+ struct svc_serv *serv;
+ struct svc_rqst *rqst;
+ struct task_struct *task;
+};
+
+static struct nfs_callback_data nfs_callback_info[NFS4_MAX_MINOR_VERSION + 1];
+static DEFINE_MUTEX(nfs_callback_mutex);
+static struct svc_program nfs4_callback_program;
+
+unsigned int nfs_callback_set_tcpport;
+unsigned short nfs_callback_tcpport;
+unsigned short nfs_callback_tcpport6;
+#define NFS_CALLBACK_MAXPORTNR (65535U)
+
+static int param_set_portnr(const char *val, const struct kernel_param *kp)
+{
+ unsigned long num;
+ int ret;
+
+ if (!val)
+ return -EINVAL;
+ ret = strict_strtoul(val, 0, &num);
+ if (ret == -EINVAL || num > NFS_CALLBACK_MAXPORTNR)
+ return -EINVAL;
+ *((unsigned int *)kp->arg) = num;
+ return 0;
+}
+static struct kernel_param_ops param_ops_portnr = {
+ .set = param_set_portnr,
+ .get = param_get_uint,
+};
+#define param_check_portnr(name, p) __param_check(name, p, unsigned int);
+
+module_param_named(callback_tcpport, nfs_callback_set_tcpport, portnr, 0644);
+
+/*
+ * This is the NFSv4 callback kernel thread.
+ */
+static int
+nfs4_callback_svc(void *vrqstp)
+{
+ int err, preverr = 0;
+ struct svc_rqst *rqstp = vrqstp;
+
+ set_freezable();
+
+ while (!kthread_should_stop()) {
+ /*
+ * Listen for a request on the socket
+ */
+ err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
+ if (err == -EAGAIN || err == -EINTR) {
+ preverr = err;
+ continue;
+ }
+ if (err < 0) {
+ if (err != preverr) {
+ printk(KERN_WARNING "NFS: %s: unexpected error "
+ "from svc_recv (%d)\n", __func__, err);
+ preverr = err;
+ }
+ schedule_timeout_uninterruptible(HZ);
+ continue;
+ }
+ preverr = err;
+ svc_process(rqstp);
+ }
+ return 0;
+}
+
+/*
+ * Prepare to bring up the NFSv4 callback service
+ */
+static struct svc_rqst *
+nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
+{
+ int ret;
+
+ ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
+ nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
+ if (ret <= 0)
+ goto out_err;
+ nfs_callback_tcpport = ret;
+ dprintk("NFS: Callback listener port = %u (af %u)\n",
+ nfs_callback_tcpport, PF_INET);
+
+ ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
+ nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
+ if (ret > 0) {
+ nfs_callback_tcpport6 = ret;
+ dprintk("NFS: Callback listener port = %u (af %u)\n",
+ nfs_callback_tcpport6, PF_INET6);
+ } else if (ret == -EAFNOSUPPORT)
+ ret = 0;
+ else
+ goto out_err;
+
+ return svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
+
+out_err:
+ if (ret == 0)
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * The callback service for NFSv4.1 callbacks
+ */
+static int
+nfs41_callback_svc(void *vrqstp)
+{
+ struct svc_rqst *rqstp = vrqstp;
+ struct svc_serv *serv = rqstp->rq_server;
+ struct rpc_rqst *req;
+ int error;
+ DEFINE_WAIT(wq);
+
+ set_freezable();
+
+ while (!kthread_should_stop()) {
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+ if (!list_empty(&serv->sv_cb_list)) {
+ req = list_first_entry(&serv->sv_cb_list,
+ struct rpc_rqst, rq_bc_list);
+ list_del(&req->rq_bc_list);
+ spin_unlock_bh(&serv->sv_cb_lock);
+ dprintk("Invoking bc_svc_process()\n");
+ error = bc_svc_process(serv, req, rqstp);
+ dprintk("bc_svc_process() returned w/ error code= %d\n",
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+ schedule();
+ }
+ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
+ return 0;
+}
+
+/*
+ * Bring up the NFSv4.1 callback service
+ */
+static struct svc_rqst *
+nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
+{
+ struct svc_rqst *rqstp;
+ int ret;
+
+ /*
+ * Create an svc_sock for the back channel service that shares the
+ * fore channel connection.
+ * Returns the input port (0) and sets the svc_serv bc_xprt on success
+ */
+ ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
+ SVC_SOCK_ANONYMOUS);
+ if (ret < 0) {
+ rqstp = ERR_PTR(ret);
+ goto out;
+ }
+
+ /*
+ * Save the svc_serv in the transport so that it can
+ * be referenced when the session backchannel is initialized
+ */
+ xprt->bc_serv = serv;
+
+ INIT_LIST_HEAD(&serv->sv_cb_list);
+ spin_lock_init(&serv->sv_cb_lock);
+ init_waitqueue_head(&serv->sv_cb_waitq);
+ rqstp = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
+ if (IS_ERR(rqstp)) {
+ svc_xprt_put(serv->sv_bc_xprt);
+ serv->sv_bc_xprt = NULL;
+ }
+out:
+ dprintk("--> %s return %ld\n", __func__,
+ IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
+ return rqstp;
+}
+
+static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
+ struct svc_serv *serv, struct rpc_xprt *xprt,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ if (minorversion) {
+ *rqstpp = nfs41_callback_up(serv, xprt);
+ *callback_svc = nfs41_callback_svc;
+ }
+ return minorversion;
+}
+
+static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
+ struct nfs_callback_data *cb_info)
+{
+ if (minorversion)
+ xprt->bc_serv = cb_info->serv;
+}
+#else
+static inline int nfs_minorversion_callback_svc_setup(u32 minorversion,
+ struct svc_serv *serv, struct rpc_xprt *xprt,
+ struct svc_rqst **rqstpp, int (**callback_svc)(void *vrqstp))
+{
+ return 0;
+}
+
+static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
+ struct nfs_callback_data *cb_info)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Bring up the callback thread if it is not already up.
+ */
+int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
+{
+ struct svc_serv *serv = NULL;
+ struct svc_rqst *rqstp;
+ int (*callback_svc)(void *vrqstp);
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+ char svc_name[12];
+ int ret = 0;
+ int minorversion_setup;
+ struct net *net = &init_net;
+
+ mutex_lock(&nfs_callback_mutex);
+ if (cb_info->users++ || cb_info->task != NULL) {
+ nfs_callback_bc_serv(minorversion, xprt, cb_info);
+ goto out;
+ }
+ serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
+ if (!serv) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ ret = svc_bind(serv, net);
+ if (ret < 0) {
+ printk(KERN_WARNING "NFS: bind callback service failed\n");
+ goto out_err;
+ }
+
+ minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion,
+ serv, xprt, &rqstp, &callback_svc);
+ if (!minorversion_setup) {
+ /* v4.0 callback setup */
+ rqstp = nfs4_callback_up(serv, xprt);
+ callback_svc = nfs4_callback_svc;
+ }
+
+ if (IS_ERR(rqstp)) {
+ ret = PTR_ERR(rqstp);
+ goto out_err;
+ }
+
+ svc_sock_update_bufs(serv);
+
+ sprintf(svc_name, "nfsv4.%u-svc", minorversion);
+ cb_info->serv = serv;
+ cb_info->rqst = rqstp;
+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
+ if (IS_ERR(cb_info->task)) {
+ ret = PTR_ERR(cb_info->task);
+ svc_exit_thread(cb_info->rqst);
+ cb_info->rqst = NULL;
+ cb_info->task = NULL;
+ goto out_err;
+ }
+out:
+ /*
+ * svc_create creates the svc_serv with sv_nrthreads == 1, and then
+ * svc_prepare_thread increments that. So we need to call svc_destroy
+ * on both success and failure so that the refcount is 1 when the
+ * thread exits.
+ */
+ if (serv)
+ svc_destroy(serv);
+ mutex_unlock(&nfs_callback_mutex);
+ return ret;
+out_err:
+ dprintk("NFS: Couldn't create callback socket or server thread; "
+ "err = %d\n", ret);
+ cb_info->users--;
+ if (serv)
+ svc_shutdown_net(serv, net);
+ goto out;
+}
+
+/*
+ * Kill the callback thread if it's no longer being used.
+ */
+void nfs_callback_down(int minorversion)
+{
+ struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
+
+ mutex_lock(&nfs_callback_mutex);
+ cb_info->users--;
+ if (cb_info->users == 0 && cb_info->task != NULL) {
+ kthread_stop(cb_info->task);
+ svc_shutdown_net(cb_info->serv, &init_net);
+ svc_exit_thread(cb_info->rqst);
+ cb_info->serv = NULL;
+ cb_info->rqst = NULL;
+ cb_info->task = NULL;
+ }
+ mutex_unlock(&nfs_callback_mutex);
+}
+
+/* Boolean check of RPC_AUTH_GSS principal */
+int
+check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
+{
+ char *p = svc_gss_principal(rqstp);
+
+ if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
+ return 1;
+
+ /* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
+ if (clp->cl_minorversion != 0)
+ return 0;
+ /*
+ * It might just be a normal user principal, in which case
+ * userspace won't bother to tell us the name at all.
+ */
+ if (p == NULL)
+ return 0;
+
+ /* Expect a GSS_C_NT_HOSTBASED_NAME like "nfs@serverhostname" */
+
+ if (memcmp(p, "nfs@", 4) != 0)
+ return 0;
+ p += 4;
+ if (strcmp(p, clp->cl_hostname) != 0)
+ return 0;
+ return 1;
+}
+
+/*
+ * pg_authenticate method for nfsv4 callback threads.
+ *
+ * The authflavor has been negotiated, so an incorrect flavor is a server
+ * bug. Drop packets with incorrect authflavor.
+ *
+ * All other checking done after NFS decoding where the nfs_client can be
+ * found in nfs4_callback_compound
+ */
+static int nfs_callback_authenticate(struct svc_rqst *rqstp)
+{
+ switch (rqstp->rq_authop->flavour) {
+ case RPC_AUTH_NULL:
+ if (rqstp->rq_proc != CB_NULL)
+ return SVC_DROP;
+ break;
+ case RPC_AUTH_GSS:
+ /* No RPC_AUTH_GSS support yet in NFSv4.1 */
+ if (svc_is_backchannel(rqstp))
+ return SVC_DROP;
+ }
+ return SVC_OK;
+}
+
+/*
+ * Define NFS4 callback program
+ */
+static struct svc_version *nfs4_callback_version[] = {
+ [1] = &nfs4_callback_version1,
+ [4] = &nfs4_callback_version4,
+};
+
+static struct svc_stat nfs4_callback_stats;
+
+static struct svc_program nfs4_callback_program = {
+ .pg_prog = NFS4_CALLBACK, /* RPC service number */
+ .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */
+ .pg_vers = nfs4_callback_version, /* version table */
+ .pg_name = "NFSv4 callback", /* service name */
+ .pg_class = "nfs", /* authentication class */
+ .pg_stats = &nfs4_callback_stats,
+ .pg_authenticate = nfs_callback_authenticate,
+};
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
new file mode 100644
index 00000000..a5527c90
--- /dev/null
+++ b/fs/nfs/callback.h
@@ -0,0 +1,214 @@
+/*
+ * linux/fs/nfs/callback.h
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFSv4 callback definitions
+ */
+#ifndef __LINUX_FS_NFS_CALLBACK_H
+#define __LINUX_FS_NFS_CALLBACK_H
+#include <linux/sunrpc/svc.h>
+
+#define NFS4_CALLBACK 0x40000000
+#define NFS4_CALLBACK_XDRSIZE 2048
+#define NFS4_CALLBACK_BUFSIZE (1024 + NFS4_CALLBACK_XDRSIZE)
+
+enum nfs4_callback_procnum {
+ CB_NULL = 0,
+ CB_COMPOUND = 1,
+};
+
+enum nfs4_callback_opnum {
+ OP_CB_GETATTR = 3,
+ OP_CB_RECALL = 4,
+/* Callback operations new to NFSv4.1 */
+ OP_CB_LAYOUTRECALL = 5,
+ OP_CB_NOTIFY = 6,
+ OP_CB_PUSH_DELEG = 7,
+ OP_CB_RECALL_ANY = 8,
+ OP_CB_RECALLABLE_OBJ_AVAIL = 9,
+ OP_CB_RECALL_SLOT = 10,
+ OP_CB_SEQUENCE = 11,
+ OP_CB_WANTS_CANCELLED = 12,
+ OP_CB_NOTIFY_LOCK = 13,
+ OP_CB_NOTIFY_DEVICEID = 14,
+ OP_CB_ILLEGAL = 10044,
+};
+
+struct cb_process_state {
+ __be32 drc_status;
+ struct nfs_client *clp;
+ u32 slotid;
+ struct net *net;
+};
+
+struct cb_compound_hdr_arg {
+ unsigned int taglen;
+ const char *tag;
+ unsigned int minorversion;
+ unsigned int cb_ident; /* v4.0 callback identifier */
+ unsigned nops;
+};
+
+struct cb_compound_hdr_res {
+ __be32 *status;
+ unsigned int taglen;
+ const char *tag;
+ __be32 *nops;
+};
+
+struct cb_getattrargs {
+ struct sockaddr *addr;
+ struct nfs_fh fh;
+ uint32_t bitmap[2];
+};
+
+struct cb_getattrres {
+ __be32 status;
+ uint32_t bitmap[2];
+ uint64_t size;
+ uint64_t change_attr;
+ struct timespec ctime;
+ struct timespec mtime;
+};
+
+struct cb_recallargs {
+ struct sockaddr *addr;
+ struct nfs_fh fh;
+ nfs4_stateid stateid;
+ uint32_t truncate;
+};
+
+#if defined(CONFIG_NFS_V4_1)
+
+struct referring_call {
+ uint32_t rc_sequenceid;
+ uint32_t rc_slotid;
+};
+
+struct referring_call_list {
+ struct nfs4_sessionid rcl_sessionid;
+ uint32_t rcl_nrefcalls;
+ struct referring_call *rcl_refcalls;
+};
+
+struct cb_sequenceargs {
+ struct sockaddr *csa_addr;
+ struct nfs4_sessionid csa_sessionid;
+ uint32_t csa_sequenceid;
+ uint32_t csa_slotid;
+ uint32_t csa_highestslotid;
+ uint32_t csa_cachethis;
+ uint32_t csa_nrclists;
+ struct referring_call_list *csa_rclists;
+};
+
+struct cb_sequenceres {
+ __be32 csr_status;
+ struct nfs4_sessionid csr_sessionid;
+ uint32_t csr_sequenceid;
+ uint32_t csr_slotid;
+ uint32_t csr_highestslotid;
+ uint32_t csr_target_highestslotid;
+};
+
+extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res,
+ struct cb_process_state *cps);
+
+extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid);
+
+#define RCA4_TYPE_MASK_RDATA_DLG 0
+#define RCA4_TYPE_MASK_WDATA_DLG 1
+#define RCA4_TYPE_MASK_DIR_DLG 2
+#define RCA4_TYPE_MASK_FILE_LAYOUT 3
+#define RCA4_TYPE_MASK_BLK_LAYOUT 4
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN 8
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX 9
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15
+#define RCA4_TYPE_MASK_ALL 0xf31f
+
+struct cb_recallanyargs {
+ struct sockaddr *craa_addr;
+ uint32_t craa_objs_to_keep;
+ uint32_t craa_type_mask;
+};
+
+extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
+ void *dummy,
+ struct cb_process_state *cps);
+
+struct cb_recallslotargs {
+ struct sockaddr *crsa_addr;
+ uint32_t crsa_target_max_slots;
+};
+extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
+ void *dummy,
+ struct cb_process_state *cps);
+
+struct cb_layoutrecallargs {
+ struct sockaddr *cbl_addr;
+ uint32_t cbl_recall_type;
+ uint32_t cbl_layout_type;
+ uint32_t cbl_layoutchanged;
+ union {
+ struct {
+ struct nfs_fh cbl_fh;
+ struct pnfs_layout_range cbl_range;
+ nfs4_stateid cbl_stateid;
+ };
+ struct nfs_fsid cbl_fsid;
+ };
+};
+
+extern __be32 nfs4_callback_layoutrecall(
+ struct cb_layoutrecallargs *args,
+ void *dummy, struct cb_process_state *cps);
+
+extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
+
+struct cb_devicenotifyitem {
+ uint32_t cbd_notify_type;
+ uint32_t cbd_layout_type;
+ struct nfs4_deviceid cbd_dev_id;
+ uint32_t cbd_immediate;
+};
+
+struct cb_devicenotifyargs {
+ int ndevs;
+ struct cb_devicenotifyitem *devs;
+};
+
+extern __be32 nfs4_callback_devicenotify(
+ struct cb_devicenotifyargs *args,
+ void *dummy, struct cb_process_state *cps);
+
+#endif /* CONFIG_NFS_V4_1 */
+extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
+extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+ struct cb_getattrres *res,
+ struct cb_process_state *cps);
+extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+ struct cb_process_state *cps);
+#ifdef CONFIG_NFS_V4
+extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
+extern void nfs_callback_down(int minorversion);
+extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
+ const nfs4_stateid *stateid);
+extern int nfs4_set_callback_sessionid(struct nfs_client *clp);
+#endif /* CONFIG_NFS_V4 */
+/*
+ * nfs41: Callbacks are expected to not cause substantial latency,
+ * so we limit their concurrency to 1 by setting up the maximum number
+ * of slots for the backchannel.
+ */
+#define NFS41_BC_MIN_CALLBACKS 1
+#define NFS41_BC_MAX_CALLBACKS 1
+
+extern unsigned int nfs_callback_set_tcpport;
+extern unsigned short nfs_callback_tcpport;
+extern unsigned short nfs_callback_tcpport6;
+
+#endif /* __LINUX_FS_NFS_CALLBACK_H */
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
new file mode 100644
index 00000000..1b5d809a
--- /dev/null
+++ b/fs/nfs/callback_proc.c
@@ -0,0 +1,569 @@
+/*
+ * linux/fs/nfs/callback_proc.c
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFSv4 callback procedures
+ */
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include "nfs4_fs.h"
+#include "callback.h"
+#include "delegation.h"
+#include "internal.h"
+#include "pnfs.h"
+
+#ifdef NFS_DEBUG
+#define NFSDBG_FACILITY NFSDBG_CALLBACK
+#endif
+
+__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+ struct cb_getattrres *res,
+ struct cb_process_state *cps)
+{
+ struct nfs_delegation *delegation;
+ struct nfs_inode *nfsi;
+ struct inode *inode;
+
+ res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
+ goto out;
+
+ res->bitmap[0] = res->bitmap[1] = 0;
+ res->status = htonl(NFS4ERR_BADHANDLE);
+
+ dprintk_rcu("NFS: GETATTR callback request from %s\n",
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+ inode = nfs_delegation_find_inode(cps->clp, &args->fh);
+ if (inode == NULL)
+ goto out;
+ nfsi = NFS_I(inode);
+ rcu_read_lock();
+ delegation = rcu_dereference(nfsi->delegation);
+ if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
+ goto out_iput;
+ res->size = i_size_read(inode);
+ res->change_attr = delegation->change_attr;
+ if (nfsi->npages != 0)
+ res->change_attr++;
+ res->ctime = inode->i_ctime;
+ res->mtime = inode->i_mtime;
+ res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
+ args->bitmap[0];
+ res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
+ args->bitmap[1];
+ res->status = 0;
+out_iput:
+ rcu_read_unlock();
+ iput(inode);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
+ return res->status;
+}
+
+__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+ struct cb_process_state *cps)
+{
+ struct inode *inode;
+ __be32 res;
+
+ res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
+ goto out;
+
+ dprintk_rcu("NFS: RECALL callback request from %s\n",
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+ res = htonl(NFS4ERR_BADHANDLE);
+ inode = nfs_delegation_find_inode(cps->clp, &args->fh);
+ if (inode == NULL)
+ goto out;
+ /* Set up a helper thread to actually return the delegation */
+ switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
+ case 0:
+ res = 0;
+ break;
+ case -ENOENT:
+ res = htonl(NFS4ERR_BAD_STATEID);
+ break;
+ default:
+ res = htonl(NFS4ERR_RESOURCE);
+ }
+ iput(inode);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
+ return res;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * Lookup a layout by filehandle.
+ *
+ * Note: gets a refcount on the layout hdr and on its respective inode.
+ * Caller must put the layout hdr and the inode.
+ *
+ * TODO: keep track of all layouts (and delegations) in a hash table
+ * hashed by filehandle.
+ */
+static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp, struct nfs_fh *fh)
+{
+ struct nfs_server *server;
+ struct inode *ino;
+ struct pnfs_layout_hdr *lo;
+
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ list_for_each_entry(lo, &server->layouts, plh_layouts) {
+ if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
+ continue;
+ ino = igrab(lo->plh_inode);
+ if (!ino)
+ continue;
+ get_layout_hdr(lo);
+ return lo;
+ }
+ }
+
+ return NULL;
+}
+
+static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp, struct nfs_fh *fh)
+{
+ struct pnfs_layout_hdr *lo;
+
+ spin_lock(&clp->cl_lock);
+ rcu_read_lock();
+ lo = get_layout_by_fh_locked(clp, fh);
+ rcu_read_unlock();
+ spin_unlock(&clp->cl_lock);
+
+ return lo;
+}
+
+static u32 initiate_file_draining(struct nfs_client *clp,
+ struct cb_layoutrecallargs *args)
+{
+ struct inode *ino;
+ struct pnfs_layout_hdr *lo;
+ u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+ LIST_HEAD(free_me_list);
+
+ lo = get_layout_by_fh(clp, &args->cbl_fh);
+ if (!lo)
+ return NFS4ERR_NOMATCHING_LAYOUT;
+
+ ino = lo->plh_inode;
+ spin_lock(&ino->i_lock);
+ if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+ mark_matching_lsegs_invalid(lo, &free_me_list,
+ &args->cbl_range))
+ rv = NFS4ERR_DELAY;
+ else
+ rv = NFS4ERR_NOMATCHING_LAYOUT;
+ pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
+ spin_unlock(&ino->i_lock);
+ pnfs_free_lseg_list(&free_me_list);
+ put_layout_hdr(lo);
+ iput(ino);
+ return rv;
+}
+
+static u32 initiate_bulk_draining(struct nfs_client *clp,
+ struct cb_layoutrecallargs *args)
+{
+ struct nfs_server *server;
+ struct pnfs_layout_hdr *lo;
+ struct inode *ino;
+ u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+ struct pnfs_layout_hdr *tmp;
+ LIST_HEAD(recall_list);
+ LIST_HEAD(free_me_list);
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+
+ spin_lock(&clp->cl_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ if ((args->cbl_recall_type == RETURN_FSID) &&
+ memcmp(&server->fsid, &args->cbl_fsid,
+ sizeof(struct nfs_fsid)))
+ continue;
+
+ list_for_each_entry(lo, &server->layouts, plh_layouts) {
+ if (!igrab(lo->plh_inode))
+ continue;
+ get_layout_hdr(lo);
+ BUG_ON(!list_empty(&lo->plh_bulk_recall));
+ list_add(&lo->plh_bulk_recall, &recall_list);
+ }
+ }
+ rcu_read_unlock();
+ spin_unlock(&clp->cl_lock);
+
+ list_for_each_entry_safe(lo, tmp,
+ &recall_list, plh_bulk_recall) {
+ ino = lo->plh_inode;
+ spin_lock(&ino->i_lock);
+ set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+ if (mark_matching_lsegs_invalid(lo, &free_me_list, &range))
+ rv = NFS4ERR_DELAY;
+ list_del_init(&lo->plh_bulk_recall);
+ spin_unlock(&ino->i_lock);
+ pnfs_free_lseg_list(&free_me_list);
+ put_layout_hdr(lo);
+ iput(ino);
+ }
+ return rv;
+}
+
+static u32 do_callback_layoutrecall(struct nfs_client *clp,
+ struct cb_layoutrecallargs *args)
+{
+ u32 res;
+
+ dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
+ if (args->cbl_recall_type == RETURN_FILE)
+ res = initiate_file_draining(clp, args);
+ else
+ res = initiate_bulk_draining(clp, args);
+ dprintk("%s returning %i\n", __func__, res);
+ return res;
+
+}
+
+__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
+ void *dummy, struct cb_process_state *cps)
+{
+ u32 res;
+
+ dprintk("%s: -->\n", __func__);
+
+ if (cps->clp)
+ res = do_callback_layoutrecall(cps->clp, args);
+ else
+ res = NFS4ERR_OP_NOT_IN_SESSION;
+
+ dprintk("%s: exit with status = %d\n", __func__, res);
+ return cpu_to_be32(res);
+}
+
+static void pnfs_recall_all_layouts(struct nfs_client *clp)
+{
+ struct cb_layoutrecallargs args;
+
+ /* Pretend we got a CB_LAYOUTRECALL(ALL) */
+ memset(&args, 0, sizeof(args));
+ args.cbl_recall_type = RETURN_ALL;
+ /* FIXME we ignore errors, what should we do? */
+ do_callback_layoutrecall(clp, &args);
+}
+
+__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
+ void *dummy, struct cb_process_state *cps)
+{
+ int i;
+ __be32 res = 0;
+ struct nfs_client *clp = cps->clp;
+ struct nfs_server *server = NULL;
+
+ dprintk("%s: -->\n", __func__);
+
+ if (!clp) {
+ res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
+ goto out;
+ }
+
+ for (i = 0; i < args->ndevs; i++) {
+ struct cb_devicenotifyitem *dev = &args->devs[i];
+
+ if (!server ||
+ server->pnfs_curr_ld->id != dev->cbd_layout_type) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ if (server->pnfs_curr_ld &&
+ server->pnfs_curr_ld->id == dev->cbd_layout_type) {
+ rcu_read_unlock();
+ goto found;
+ }
+ rcu_read_unlock();
+ dprintk("%s: layout type %u not found\n",
+ __func__, dev->cbd_layout_type);
+ continue;
+ }
+
+ found:
+ if (dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE)
+ dprintk("%s: NOTIFY_DEVICEID4_CHANGE not supported, "
+ "deleting instead\n", __func__);
+ nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
+ }
+
+out:
+ kfree(args->devs);
+ dprintk("%s: exit with status = %u\n",
+ __func__, be32_to_cpu(res));
+ return res;
+}
+
+/*
+ * Validate the sequenceID sent by the server.
+ * Return success if the sequenceID is one more than what we last saw on
+ * this slot, accounting for wraparound. Increments the slot's sequence.
+ *
+ * We don't yet implement a duplicate request cache, instead we set the
+ * back channel ca_maxresponsesize_cached to zero. This is OK for now
+ * since we only currently implement idempotent callbacks anyway.
+ *
+ * We have a single slot backchannel at this time, so we don't bother
+ * checking the used_slots bit array on the table. The lower layer guarantees
+ * a single outstanding callback request at a time.
+ */
+static __be32
+validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
+{
+ struct nfs4_slot *slot;
+
+ dprintk("%s enter. slotid %d seqid %d\n",
+ __func__, args->csa_slotid, args->csa_sequenceid);
+
+ if (args->csa_slotid >= NFS41_BC_MAX_CALLBACKS)
+ return htonl(NFS4ERR_BADSLOT);
+
+ slot = tbl->slots + args->csa_slotid;
+ dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
+
+ /* Normal */
+ if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {
+ slot->seq_nr++;
+ goto out_ok;
+ }
+
+ /* Replay */
+ if (args->csa_sequenceid == slot->seq_nr) {
+ dprintk("%s seqid %d is a replay\n",
+ __func__, args->csa_sequenceid);
+ /* Signal process_op to set this error on next op */
+ if (args->csa_cachethis == 0)
+ return htonl(NFS4ERR_RETRY_UNCACHED_REP);
+
+ /* The ca_maxresponsesize_cached is 0 with no DRC */
+ else if (args->csa_cachethis == 1)
+ return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+ }
+
+ /* Wraparound */
+ if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {
+ slot->seq_nr = 1;
+ goto out_ok;
+ }
+
+ /* Misordered request */
+ return htonl(NFS4ERR_SEQ_MISORDERED);
+out_ok:
+ tbl->highest_used_slotid = args->csa_slotid;
+ return htonl(NFS4_OK);
+}
+
+/*
+ * For each referring call triple, check the session's slot table for
+ * a match. If the slot is in use and the sequence numbers match, the
+ * client is still waiting for a response to the original request.
+ */
+static bool referring_call_exists(struct nfs_client *clp,
+ uint32_t nrclists,
+ struct referring_call_list *rclists)
+{
+ bool status = 0;
+ int i, j;
+ struct nfs4_session *session;
+ struct nfs4_slot_table *tbl;
+ struct referring_call_list *rclist;
+ struct referring_call *ref;
+
+ /*
+ * XXX When client trunking is implemented, this becomes
+ * a session lookup from within the loop
+ */
+ session = clp->cl_session;
+ tbl = &session->fc_slot_table;
+
+ for (i = 0; i < nrclists; i++) {
+ rclist = &rclists[i];
+ if (memcmp(session->sess_id.data,
+ rclist->rcl_sessionid.data,
+ NFS4_MAX_SESSIONID_LEN) != 0)
+ continue;
+
+ for (j = 0; j < rclist->rcl_nrefcalls; j++) {
+ ref = &rclist->rcl_refcalls[j];
+
+ dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
+ "slotid %u\n", __func__,
+ ((u32 *)&rclist->rcl_sessionid.data)[0],
+ ((u32 *)&rclist->rcl_sessionid.data)[1],
+ ((u32 *)&rclist->rcl_sessionid.data)[2],
+ ((u32 *)&rclist->rcl_sessionid.data)[3],
+ ref->rc_sequenceid, ref->rc_slotid);
+
+ spin_lock(&tbl->slot_tbl_lock);
+ status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
+ tbl->slots[ref->rc_slotid].seq_nr ==
+ ref->rc_sequenceid);
+ spin_unlock(&tbl->slot_tbl_lock);
+ if (status)
+ goto out;
+ }
+ }
+
+out:
+ return status;
+}
+
+__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
+ struct cb_sequenceres *res,
+ struct cb_process_state *cps)
+{
+ struct nfs4_slot_table *tbl;
+ struct nfs_client *clp;
+ int i;
+ __be32 status = htonl(NFS4ERR_BADSESSION);
+
+ clp = nfs4_find_client_sessionid(cps->net, args->csa_addr, &args->csa_sessionid);
+ if (clp == NULL)
+ goto out;
+
+ tbl = &clp->cl_session->bc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ /* state manager is resetting the session */
+ if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
+ spin_unlock(&tbl->slot_tbl_lock);
+ status = htonl(NFS4ERR_DELAY);
+ /* Return NFS4ERR_BADSESSION if we're draining the session
+ * in order to reset it.
+ */
+ if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
+ status = htonl(NFS4ERR_BADSESSION);
+ goto out;
+ }
+
+ status = validate_seqid(&clp->cl_session->bc_slot_table, args);
+ spin_unlock(&tbl->slot_tbl_lock);
+ if (status)
+ goto out;
+
+ cps->slotid = args->csa_slotid;
+
+ /*
+ * Check for pending referring calls. If a match is found, a
+ * related callback was received before the response to the original
+ * call.
+ */
+ if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
+ status = htonl(NFS4ERR_DELAY);
+ goto out;
+ }
+
+ memcpy(&res->csr_sessionid, &args->csa_sessionid,
+ sizeof(res->csr_sessionid));
+ res->csr_sequenceid = args->csa_sequenceid;
+ res->csr_slotid = args->csa_slotid;
+ res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+ res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+
+out:
+ cps->clp = clp; /* put in nfs4_callback_compound */
+ for (i = 0; i < args->csa_nrclists; i++)
+ kfree(args->csa_rclists[i].rcl_refcalls);
+ kfree(args->csa_rclists);
+
+ if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
+ cps->drc_status = status;
+ status = 0;
+ } else
+ res->csr_status = status;
+
+ dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
+ ntohl(status), ntohl(res->csr_status));
+ return status;
+}
+
+static bool
+validate_bitmap_values(unsigned long mask)
+{
+ return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
+}
+
+__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
+ struct cb_process_state *cps)
+{
+ __be32 status;
+ fmode_t flags = 0;
+
+ status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
+ if (!cps->clp) /* set in cb_sequence */
+ goto out;
+
+ dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+ status = cpu_to_be32(NFS4ERR_INVAL);
+ if (!validate_bitmap_values(args->craa_type_mask))
+ goto out;
+
+ status = cpu_to_be32(NFS4_OK);
+ if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
+ &args->craa_type_mask))
+ flags = FMODE_READ;
+ if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
+ &args->craa_type_mask))
+ flags |= FMODE_WRITE;
+ if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
+ &args->craa_type_mask))
+ pnfs_recall_all_layouts(cps->clp);
+ if (flags)
+ nfs_expire_all_delegation_types(cps->clp, flags);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+/* Reduce the fore channel's max_slots to the target value */
+__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
+ struct cb_process_state *cps)
+{
+ struct nfs4_slot_table *fc_tbl;
+ __be32 status;
+
+ status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ if (!cps->clp) /* set in cb_sequence */
+ goto out;
+
+ dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+ args->crsa_target_max_slots);
+
+ fc_tbl = &cps->clp->cl_session->fc_slot_table;
+
+ status = htonl(NFS4ERR_BAD_HIGH_SLOT);
+ if (args->crsa_target_max_slots > fc_tbl->max_slots ||
+ args->crsa_target_max_slots < 1)
+ goto out;
+
+ status = htonl(NFS4_OK);
+ if (args->crsa_target_max_slots == fc_tbl->max_slots)
+ goto out;
+
+ fc_tbl->target_max_slots = args->crsa_target_max_slots;
+ nfs41_handle_recall_slot(cps->clp);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
new file mode 100644
index 00000000..95bfc243
--- /dev/null
+++ b/fs/nfs/callback_xdr.c
@@ -0,0 +1,998 @@
+/*
+ * linux/fs/nfs/callback_xdr.c
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFSv4 callback encode/decode procedures
+ */
+#include <linux/kernel.h>
+#include <linux/sunrpc/svc.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/sunrpc/bc_xprt.h>
+#include "nfs4_fs.h"
+#include "callback.h"
+#include "internal.h"
+
+#define CB_OP_TAGLEN_MAXSZ (512)
+#define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ)
+#define CB_OP_GETATTR_BITMAP_MAXSZ (4)
+#define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
+ CB_OP_GETATTR_BITMAP_MAXSZ + \
+ 2 + 2 + 3 + 3)
+#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+
+#if defined(CONFIG_NFS_V4_1)
+#define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#define CB_OP_DEVICENOTIFY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
+ 4 + 1 + 3)
+#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#endif /* CONFIG_NFS_V4_1 */
+
+#define NFSDBG_FACILITY NFSDBG_CALLBACK
+
+/* Internal error code */
+#define NFS4ERR_RESOURCE_HDR 11050
+
+typedef __be32 (*callback_process_op_t)(void *, void *,
+ struct cb_process_state *);
+typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
+typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
+
+
+struct callback_op {
+ callback_process_op_t process_op;
+ callback_decode_arg_t decode_args;
+ callback_encode_res_t encode_res;
+ long res_maxsize;
+};
+
+static struct callback_op callback_ops[];
+
+static __be32 nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+ return htonl(NFS4_OK);
+}
+
+static int nfs4_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+{
+ return xdr_argsize_check(rqstp, p);
+}
+
+static int nfs4_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
+{
+ return xdr_ressize_check(rqstp, p);
+}
+
+static __be32 *read_buf(struct xdr_stream *xdr, int nbytes)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, nbytes);
+ if (unlikely(p == NULL))
+ printk(KERN_WARNING "NFS: NFSv4 callback reply buffer overflowed!\n");
+ return p;
+}
+
+static __be32 decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str)
+{
+ __be32 *p;
+
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ *len = ntohl(*p);
+
+ if (*len != 0) {
+ p = read_buf(xdr, *len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ *str = (const char *)p;
+ } else
+ *str = NULL;
+
+ return 0;
+}
+
+static __be32 decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ fh->size = ntohl(*p);
+ if (fh->size > NFS4_FHSIZE)
+ return htonl(NFS4ERR_BADHANDLE);
+ p = read_buf(xdr, fh->size);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ memcpy(&fh->data[0], p, fh->size);
+ memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size);
+ return 0;
+}
+
+static __be32 decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
+{
+ __be32 *p;
+ unsigned int attrlen;
+
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ attrlen = ntohl(*p);
+ p = read_buf(xdr, attrlen << 2);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ if (likely(attrlen > 0))
+ bitmap[0] = ntohl(*p++);
+ if (attrlen > 1)
+ bitmap[1] = ntohl(*p);
+ return 0;
+}
+
+static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ __be32 *p;
+
+ p = read_buf(xdr, NFS4_STATEID_SIZE);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ memcpy(stateid, p, NFS4_STATEID_SIZE);
+ return 0;
+}
+
+static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
+{
+ __be32 *p;
+ __be32 status;
+
+ status = decode_string(xdr, &hdr->taglen, &hdr->tag);
+ if (unlikely(status != 0))
+ return status;
+ /* We do not like overly long tags! */
+ if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) {
+ printk("NFS: NFSv4 CALLBACK %s: client sent tag of length %u\n",
+ __func__, hdr->taglen);
+ return htonl(NFS4ERR_RESOURCE);
+ }
+ p = read_buf(xdr, 12);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ hdr->minorversion = ntohl(*p++);
+ /* Check minor version is zero or one. */
+ if (hdr->minorversion <= 1) {
+ hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */
+ } else {
+ pr_warn_ratelimited("NFS: %s: NFSv4 server callback with "
+ "illegal minor version %u!\n",
+ __func__, hdr->minorversion);
+ return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
+ }
+ hdr->nops = ntohl(*p);
+ dprintk("%s: minorversion %d nops %d\n", __func__,
+ hdr->minorversion, hdr->nops);
+ return 0;
+}
+
+static __be32 decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
+{
+ __be32 *p;
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE_HDR);
+ *op = ntohl(*p);
+ return 0;
+}
+
+static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args)
+{
+ __be32 status;
+
+ status = decode_fh(xdr, &args->fh);
+ if (unlikely(status != 0))
+ goto out;
+ args->addr = svc_addr(rqstp);
+ status = decode_bitmap(xdr, args->bitmap);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args)
+{
+ __be32 *p;
+ __be32 status;
+
+ args->addr = svc_addr(rqstp);
+ status = decode_stateid(xdr, &args->stateid);
+ if (unlikely(status != 0))
+ goto out;
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_RESOURCE);
+ goto out;
+ }
+ args->truncate = ntohl(*p);
+ status = decode_fh(xdr, &args->fh);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+
+static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_layoutrecallargs *args)
+{
+ __be32 *p;
+ __be32 status = 0;
+ uint32_t iomode;
+
+ args->cbl_addr = svc_addr(rqstp);
+ p = read_buf(xdr, 4 * sizeof(uint32_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+
+ args->cbl_layout_type = ntohl(*p++);
+ /* Depite the spec's xdr, iomode really belongs in the FILE switch,
+ * as it is unusable and ignored with the other types.
+ */
+ iomode = ntohl(*p++);
+ args->cbl_layoutchanged = ntohl(*p++);
+ args->cbl_recall_type = ntohl(*p++);
+
+ if (args->cbl_recall_type == RETURN_FILE) {
+ args->cbl_range.iomode = iomode;
+ status = decode_fh(xdr, &args->cbl_fh);
+ if (unlikely(status != 0))
+ goto out;
+
+ p = read_buf(xdr, 2 * sizeof(uint64_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+ p = xdr_decode_hyper(p, &args->cbl_range.offset);
+ p = xdr_decode_hyper(p, &args->cbl_range.length);
+ status = decode_stateid(xdr, &args->cbl_stateid);
+ if (unlikely(status != 0))
+ goto out;
+ } else if (args->cbl_recall_type == RETURN_FSID) {
+ p = read_buf(xdr, 2 * sizeof(uint64_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+ p = xdr_decode_hyper(p, &args->cbl_fsid.major);
+ p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
+ } else if (args->cbl_recall_type != RETURN_ALL) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+ dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n",
+ __func__,
+ args->cbl_layout_type, iomode,
+ args->cbl_layoutchanged, args->cbl_recall_type);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+static
+__be32 decode_devicenotify_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_devicenotifyargs *args)
+{
+ __be32 *p;
+ __be32 status = 0;
+ u32 tmp;
+ int n, i;
+ args->ndevs = 0;
+
+ /* Num of device notifications */
+ p = read_buf(xdr, sizeof(uint32_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+ n = ntohl(*p++);
+ if (n <= 0)
+ goto out;
+ if (n > ULONG_MAX / sizeof(*args->devs)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto out;
+ }
+
+ args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL);
+ if (!args->devs) {
+ status = htonl(NFS4ERR_DELAY);
+ goto out;
+ }
+
+ /* Decode each dev notification */
+ for (i = 0; i < n; i++) {
+ struct cb_devicenotifyitem *dev = &args->devs[i];
+
+ p = read_buf(xdr, (4 * sizeof(uint32_t)) + NFS4_DEVICEID4_SIZE);
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto err;
+ }
+
+ tmp = ntohl(*p++); /* bitmap size */
+ if (tmp != 1) {
+ status = htonl(NFS4ERR_INVAL);
+ goto err;
+ }
+ dev->cbd_notify_type = ntohl(*p++);
+ if (dev->cbd_notify_type != NOTIFY_DEVICEID4_CHANGE &&
+ dev->cbd_notify_type != NOTIFY_DEVICEID4_DELETE) {
+ status = htonl(NFS4ERR_INVAL);
+ goto err;
+ }
+
+ tmp = ntohl(*p++); /* opaque size */
+ if (((dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE) &&
+ (tmp != NFS4_DEVICEID4_SIZE + 8)) ||
+ ((dev->cbd_notify_type == NOTIFY_DEVICEID4_DELETE) &&
+ (tmp != NFS4_DEVICEID4_SIZE + 4))) {
+ status = htonl(NFS4ERR_INVAL);
+ goto err;
+ }
+ dev->cbd_layout_type = ntohl(*p++);
+ memcpy(dev->cbd_dev_id.data, p, NFS4_DEVICEID4_SIZE);
+ p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
+
+ if (dev->cbd_layout_type == NOTIFY_DEVICEID4_CHANGE) {
+ p = read_buf(xdr, sizeof(uint32_t));
+ if (unlikely(p == NULL)) {
+ status = htonl(NFS4ERR_BADXDR);
+ goto err;
+ }
+ dev->cbd_immediate = ntohl(*p++);
+ } else {
+ dev->cbd_immediate = 0;
+ }
+
+ args->ndevs++;
+
+ dprintk("%s: type %d layout 0x%x immediate %d\n",
+ __func__, dev->cbd_notify_type, dev->cbd_layout_type,
+ dev->cbd_immediate);
+ }
+out:
+ dprintk("%s: status %d ndevs %d\n",
+ __func__, ntohl(status), args->ndevs);
+ return status;
+err:
+ kfree(args->devs);
+ goto out;
+}
+
+static __be32 decode_sessionid(struct xdr_stream *xdr,
+ struct nfs4_sessionid *sid)
+{
+ __be32 *p;
+ int len = NFS4_MAX_SESSIONID_LEN;
+
+ p = read_buf(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ memcpy(sid->data, p, len);
+ return 0;
+}
+
+static __be32 decode_rc_list(struct xdr_stream *xdr,
+ struct referring_call_list *rc_list)
+{
+ __be32 *p;
+ int i;
+ __be32 status;
+
+ status = decode_sessionid(xdr, &rc_list->rcl_sessionid);
+ if (status)
+ goto out;
+
+ status = htonl(NFS4ERR_RESOURCE);
+ p = read_buf(xdr, sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+
+ rc_list->rcl_nrefcalls = ntohl(*p++);
+ if (rc_list->rcl_nrefcalls) {
+ p = read_buf(xdr,
+ rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+ rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
+ sizeof(*rc_list->rcl_refcalls),
+ GFP_KERNEL);
+ if (unlikely(rc_list->rcl_refcalls == NULL))
+ goto out;
+ for (i = 0; i < rc_list->rcl_nrefcalls; i++) {
+ rc_list->rcl_refcalls[i].rc_sequenceid = ntohl(*p++);
+ rc_list->rcl_refcalls[i].rc_slotid = ntohl(*p++);
+ }
+ }
+ status = 0;
+
+out:
+ return status;
+}
+
+static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_sequenceargs *args)
+{
+ __be32 *p;
+ int i;
+ __be32 status;
+
+ status = decode_sessionid(xdr, &args->csa_sessionid);
+ if (status)
+ goto out;
+
+ status = htonl(NFS4ERR_RESOURCE);
+ p = read_buf(xdr, 5 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ goto out;
+
+ args->csa_addr = svc_addr(rqstp);
+ args->csa_sequenceid = ntohl(*p++);
+ args->csa_slotid = ntohl(*p++);
+ args->csa_highestslotid = ntohl(*p++);
+ args->csa_cachethis = ntohl(*p++);
+ args->csa_nrclists = ntohl(*p++);
+ args->csa_rclists = NULL;
+ if (args->csa_nrclists) {
+ args->csa_rclists = kmalloc(args->csa_nrclists *
+ sizeof(*args->csa_rclists),
+ GFP_KERNEL);
+ if (unlikely(args->csa_rclists == NULL))
+ goto out;
+
+ for (i = 0; i < args->csa_nrclists; i++) {
+ status = decode_rc_list(xdr, &args->csa_rclists[i]);
+ if (status)
+ goto out_free;
+ }
+ }
+ status = 0;
+
+ dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u "
+ "highestslotid %u cachethis %d nrclists %u\n",
+ __func__,
+ ((u32 *)&args->csa_sessionid)[0],
+ ((u32 *)&args->csa_sessionid)[1],
+ ((u32 *)&args->csa_sessionid)[2],
+ ((u32 *)&args->csa_sessionid)[3],
+ args->csa_sequenceid, args->csa_slotid,
+ args->csa_highestslotid, args->csa_cachethis,
+ args->csa_nrclists);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+
+out_free:
+ for (i = 0; i < args->csa_nrclists; i++)
+ kfree(args->csa_rclists[i].rcl_refcalls);
+ kfree(args->csa_rclists);
+ goto out;
+}
+
+static __be32 decode_recallany_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_recallanyargs *args)
+{
+ uint32_t bitmap[2];
+ __be32 *p, status;
+
+ args->craa_addr = svc_addr(rqstp);
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_BADXDR);
+ args->craa_objs_to_keep = ntohl(*p++);
+ status = decode_bitmap(xdr, bitmap);
+ if (unlikely(status))
+ return status;
+ args->craa_type_mask = bitmap[0];
+
+ return 0;
+}
+
+static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct cb_recallslotargs *args)
+{
+ __be32 *p;
+
+ args->crsa_addr = svc_addr(rqstp);
+ p = read_buf(xdr, 4);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_BADXDR);
+ args->crsa_target_max_slots = ntohl(*p++);
+ return 0;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 4 + len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ xdr_encode_opaque(p, str, len);
+ return 0;
+}
+
+#define CB_SUPPORTED_ATTR0 (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE)
+#define CB_SUPPORTED_ATTR1 (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY)
+static __be32 encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, __be32 **savep)
+{
+ __be32 bm[2];
+ __be32 *p;
+
+ bm[0] = htonl(bitmap[0] & CB_SUPPORTED_ATTR0);
+ bm[1] = htonl(bitmap[1] & CB_SUPPORTED_ATTR1);
+ if (bm[1] != 0) {
+ p = xdr_reserve_space(xdr, 16);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ *p++ = htonl(2);
+ *p++ = bm[0];
+ *p++ = bm[1];
+ } else if (bm[0] != 0) {
+ p = xdr_reserve_space(xdr, 12);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ *p++ = htonl(1);
+ *p++ = bm[0];
+ } else {
+ p = xdr_reserve_space(xdr, 8);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ *p++ = htonl(0);
+ }
+ *savep = p;
+ return 0;
+}
+
+static __be32 encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change)
+{
+ __be32 *p;
+
+ if (!(bitmap[0] & FATTR4_WORD0_CHANGE))
+ return 0;
+ p = xdr_reserve_space(xdr, 8);
+ if (unlikely(!p))
+ return htonl(NFS4ERR_RESOURCE);
+ p = xdr_encode_hyper(p, change);
+ return 0;
+}
+
+static __be32 encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size)
+{
+ __be32 *p;
+
+ if (!(bitmap[0] & FATTR4_WORD0_SIZE))
+ return 0;
+ p = xdr_reserve_space(xdr, 8);
+ if (unlikely(!p))
+ return htonl(NFS4ERR_RESOURCE);
+ p = xdr_encode_hyper(p, size);
+ return 0;
+}
+
+static __be32 encode_attr_time(struct xdr_stream *xdr, const struct timespec *time)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 12);
+ if (unlikely(!p))
+ return htonl(NFS4ERR_RESOURCE);
+ p = xdr_encode_hyper(p, time->tv_sec);
+ *p = htonl(time->tv_nsec);
+ return 0;
+}
+
+static __be32 encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time)
+{
+ if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA))
+ return 0;
+ return encode_attr_time(xdr,time);
+}
+
+static __be32 encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time)
+{
+ if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY))
+ return 0;
+ return encode_attr_time(xdr,time);
+}
+
+static __be32 encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr)
+{
+ __be32 status;
+
+ hdr->status = xdr_reserve_space(xdr, 4);
+ if (unlikely(hdr->status == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ status = encode_string(xdr, hdr->taglen, hdr->tag);
+ if (unlikely(status != 0))
+ return status;
+ hdr->nops = xdr_reserve_space(xdr, 4);
+ if (unlikely(hdr->nops == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+ return 0;
+}
+
+static __be32 encode_op_hdr(struct xdr_stream *xdr, uint32_t op, __be32 res)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 8);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE_HDR);
+ *p++ = htonl(op);
+ *p = res;
+ return 0;
+}
+
+static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res)
+{
+ __be32 *savep = NULL;
+ __be32 status = res->status;
+
+ if (unlikely(status != 0))
+ goto out;
+ status = encode_attr_bitmap(xdr, res->bitmap, &savep);
+ if (unlikely(status != 0))
+ goto out;
+ status = encode_attr_change(xdr, res->bitmap, res->change_attr);
+ if (unlikely(status != 0))
+ goto out;
+ status = encode_attr_size(xdr, res->bitmap, res->size);
+ if (unlikely(status != 0))
+ goto out;
+ status = encode_attr_ctime(xdr, res->bitmap, &res->ctime);
+ if (unlikely(status != 0))
+ goto out;
+ status = encode_attr_mtime(xdr, res->bitmap, &res->mtime);
+ *savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+
+static __be32 encode_sessionid(struct xdr_stream *xdr,
+ const struct nfs4_sessionid *sid)
+{
+ __be32 *p;
+ int len = NFS4_MAX_SESSIONID_LEN;
+
+ p = xdr_reserve_space(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ memcpy(p, sid, len);
+ return 0;
+}
+
+static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ const struct cb_sequenceres *res)
+{
+ __be32 *p;
+ unsigned status = res->csr_status;
+
+ if (unlikely(status != 0))
+ goto out;
+
+ encode_sessionid(xdr, &res->csr_sessionid);
+
+ p = xdr_reserve_space(xdr, 4 * sizeof(uint32_t));
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_RESOURCE);
+
+ *p++ = htonl(res->csr_sequenceid);
+ *p++ = htonl(res->csr_slotid);
+ *p++ = htonl(res->csr_highestslotid);
+ *p++ = htonl(res->csr_target_highestslotid);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+static __be32
+preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
+{
+ if (op_nr == OP_CB_SEQUENCE) {
+ if (nop != 0)
+ return htonl(NFS4ERR_SEQUENCE_POS);
+ } else {
+ if (nop == 0)
+ return htonl(NFS4ERR_OP_NOT_IN_SESSION);
+ }
+
+ switch (op_nr) {
+ case OP_CB_GETATTR:
+ case OP_CB_RECALL:
+ case OP_CB_SEQUENCE:
+ case OP_CB_RECALL_ANY:
+ case OP_CB_RECALL_SLOT:
+ case OP_CB_LAYOUTRECALL:
+ case OP_CB_NOTIFY_DEVICEID:
+ *op = &callback_ops[op_nr];
+ break;
+
+ case OP_CB_NOTIFY:
+ case OP_CB_PUSH_DELEG:
+ case OP_CB_RECALLABLE_OBJ_AVAIL:
+ case OP_CB_WANTS_CANCELLED:
+ case OP_CB_NOTIFY_LOCK:
+ return htonl(NFS4ERR_NOTSUPP);
+
+ default:
+ return htonl(NFS4ERR_OP_ILLEGAL);
+ }
+
+ return htonl(NFS_OK);
+}
+
+static void nfs4_callback_free_slot(struct nfs4_session *session)
+{
+ struct nfs4_slot_table *tbl = &session->bc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ /*
+ * Let the state manager know callback processing done.
+ * A single slot, so highest used slotid is either 0 or -1
+ */
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
+ nfs4_check_drain_bc_complete(session);
+ spin_unlock(&tbl->slot_tbl_lock);
+}
+
+static void nfs4_cb_free_slot(struct cb_process_state *cps)
+{
+ if (cps->slotid != NFS4_NO_SLOT)
+ nfs4_callback_free_slot(cps->clp->cl_session);
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+static __be32
+preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
+{
+ return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
+}
+
+static void nfs4_cb_free_slot(struct cb_process_state *cps)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static __be32
+preprocess_nfs4_op(unsigned int op_nr, struct callback_op **op)
+{
+ switch (op_nr) {
+ case OP_CB_GETATTR:
+ case OP_CB_RECALL:
+ *op = &callback_ops[op_nr];
+ break;
+ default:
+ return htonl(NFS4ERR_OP_ILLEGAL);
+ }
+
+ return htonl(NFS_OK);
+}
+
+static __be32 process_op(uint32_t minorversion, int nop,
+ struct svc_rqst *rqstp,
+ struct xdr_stream *xdr_in, void *argp,
+ struct xdr_stream *xdr_out, void *resp,
+ struct cb_process_state *cps)
+{
+ struct callback_op *op = &callback_ops[0];
+ unsigned int op_nr;
+ __be32 status;
+ long maxlen;
+ __be32 res;
+
+ dprintk("%s: start\n", __func__);
+ status = decode_op_hdr(xdr_in, &op_nr);
+ if (unlikely(status))
+ return status;
+
+ dprintk("%s: minorversion=%d nop=%d op_nr=%u\n",
+ __func__, minorversion, nop, op_nr);
+
+ status = minorversion ? preprocess_nfs41_op(nop, op_nr, &op) :
+ preprocess_nfs4_op(op_nr, &op);
+ if (status == htonl(NFS4ERR_OP_ILLEGAL))
+ op_nr = OP_CB_ILLEGAL;
+ if (status)
+ goto encode_hdr;
+
+ if (cps->drc_status) {
+ status = cps->drc_status;
+ goto encode_hdr;
+ }
+
+ maxlen = xdr_out->end - xdr_out->p;
+ if (maxlen > 0 && maxlen < PAGE_SIZE) {
+ status = op->decode_args(rqstp, xdr_in, argp);
+ if (likely(status == 0))
+ status = op->process_op(argp, resp, cps);
+ } else
+ status = htonl(NFS4ERR_RESOURCE);
+
+encode_hdr:
+ res = encode_op_hdr(xdr_out, op_nr, status);
+ if (unlikely(res))
+ return res;
+ if (op->encode_res != NULL && status == 0)
+ status = op->encode_res(rqstp, xdr_out, resp);
+ dprintk("%s: done, status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
+/*
+ * Decode, process and encode a COMPOUND
+ */
+static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp)
+{
+ struct cb_compound_hdr_arg hdr_arg = { 0 };
+ struct cb_compound_hdr_res hdr_res = { NULL };
+ struct xdr_stream xdr_in, xdr_out;
+ __be32 *p, status;
+ struct cb_process_state cps = {
+ .drc_status = 0,
+ .clp = NULL,
+ .slotid = NFS4_NO_SLOT,
+ .net = rqstp->rq_xprt->xpt_net,
+ };
+ unsigned int nops = 0;
+
+ dprintk("%s: start\n", __func__);
+
+ xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
+
+ p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
+ xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
+
+ status = decode_compound_hdr_arg(&xdr_in, &hdr_arg);
+ if (status == __constant_htonl(NFS4ERR_RESOURCE))
+ return rpc_garbage_args;
+
+ if (hdr_arg.minorversion == 0) {
+ cps.clp = nfs4_find_client_ident(rqstp->rq_xprt->xpt_net, hdr_arg.cb_ident);
+ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
+ return rpc_drop_reply;
+ }
+
+ hdr_res.taglen = hdr_arg.taglen;
+ hdr_res.tag = hdr_arg.tag;
+ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
+ return rpc_system_err;
+
+ while (status == 0 && nops != hdr_arg.nops) {
+ status = process_op(hdr_arg.minorversion, nops, rqstp,
+ &xdr_in, argp, &xdr_out, resp, &cps);
+ nops++;
+ }
+
+ /* Buffer overflow in decode_ops_hdr or encode_ops_hdr. Return
+ * resource error in cb_compound status without returning op */
+ if (unlikely(status == htonl(NFS4ERR_RESOURCE_HDR))) {
+ status = htonl(NFS4ERR_RESOURCE);
+ nops--;
+ }
+
+ *hdr_res.status = status;
+ *hdr_res.nops = htonl(nops);
+ nfs4_cb_free_slot(&cps);
+ nfs_put_client(cps.clp);
+ dprintk("%s: done, status = %u\n", __func__, ntohl(status));
+ return rpc_success;
+}
+
+/*
+ * Define NFS4 callback COMPOUND ops.
+ */
+static struct callback_op callback_ops[] = {
+ [0] = {
+ .res_maxsize = CB_OP_HDR_RES_MAXSZ,
+ },
+ [OP_CB_GETATTR] = {
+ .process_op = (callback_process_op_t)nfs4_callback_getattr,
+ .decode_args = (callback_decode_arg_t)decode_getattr_args,
+ .encode_res = (callback_encode_res_t)encode_getattr_res,
+ .res_maxsize = CB_OP_GETATTR_RES_MAXSZ,
+ },
+ [OP_CB_RECALL] = {
+ .process_op = (callback_process_op_t)nfs4_callback_recall,
+ .decode_args = (callback_decode_arg_t)decode_recall_args,
+ .res_maxsize = CB_OP_RECALL_RES_MAXSZ,
+ },
+#if defined(CONFIG_NFS_V4_1)
+ [OP_CB_LAYOUTRECALL] = {
+ .process_op = (callback_process_op_t)nfs4_callback_layoutrecall,
+ .decode_args =
+ (callback_decode_arg_t)decode_layoutrecall_args,
+ .res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
+ },
+ [OP_CB_NOTIFY_DEVICEID] = {
+ .process_op = (callback_process_op_t)nfs4_callback_devicenotify,
+ .decode_args =
+ (callback_decode_arg_t)decode_devicenotify_args,
+ .res_maxsize = CB_OP_DEVICENOTIFY_RES_MAXSZ,
+ },
+ [OP_CB_SEQUENCE] = {
+ .process_op = (callback_process_op_t)nfs4_callback_sequence,
+ .decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
+ .encode_res = (callback_encode_res_t)encode_cb_sequence_res,
+ .res_maxsize = CB_OP_SEQUENCE_RES_MAXSZ,
+ },
+ [OP_CB_RECALL_ANY] = {
+ .process_op = (callback_process_op_t)nfs4_callback_recallany,
+ .decode_args = (callback_decode_arg_t)decode_recallany_args,
+ .res_maxsize = CB_OP_RECALLANY_RES_MAXSZ,
+ },
+ [OP_CB_RECALL_SLOT] = {
+ .process_op = (callback_process_op_t)nfs4_callback_recallslot,
+ .decode_args = (callback_decode_arg_t)decode_recallslot_args,
+ .res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ,
+ },
+#endif /* CONFIG_NFS_V4_1 */
+};
+
+/*
+ * Define NFS4 callback procedures
+ */
+static struct svc_procedure nfs4_callback_procedures1[] = {
+ [CB_NULL] = {
+ .pc_func = nfs4_callback_null,
+ .pc_decode = (kxdrproc_t)nfs4_decode_void,
+ .pc_encode = (kxdrproc_t)nfs4_encode_void,
+ .pc_xdrressize = 1,
+ },
+ [CB_COMPOUND] = {
+ .pc_func = nfs4_callback_compound,
+ .pc_encode = (kxdrproc_t)nfs4_encode_void,
+ .pc_argsize = 256,
+ .pc_ressize = 256,
+ .pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
+ }
+};
+
+struct svc_version nfs4_callback_version1 = {
+ .vs_vers = 1,
+ .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
+ .vs_proc = nfs4_callback_procedures1,
+ .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
+ .vs_dispatch = NULL,
+ .vs_hidden = 1,
+};
+
+struct svc_version nfs4_callback_version4 = {
+ .vs_vers = 4,
+ .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
+ .vs_proc = nfs4_callback_procedures1,
+ .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
+ .vs_dispatch = NULL,
+ .vs_hidden = 1,
+};
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
new file mode 100644
index 00000000..60f7e4ec
--- /dev/null
+++ b/fs/nfs/client.c
@@ -0,0 +1,2078 @@
+/* client.c: NFS client sharing and management code
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/metrics.h>
+#include <linux/sunrpc/xprtsock.h>
+#include <linux/sunrpc/xprtrdma.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include <linux/lockd/bind.h>
+#include <linux/seq_file.h>
+#include <linux/mount.h>
+#include <linux/nfs_idmap.h>
+#include <linux/vfs.h>
+#include <linux/inet.h>
+#include <linux/in6.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <net/ipv6.h>
+#include <linux/nfs_xdr.h>
+#include <linux/sunrpc/bc_xprt.h>
+#include <linux/nsproxy.h>
+#include <linux/pid_namespace.h>
+
+
+#include "nfs4_fs.h"
+#include "callback.h"
+#include "delegation.h"
+#include "iostat.h"
+#include "internal.h"
+#include "fscache.h"
+#include "pnfs.h"
+#include "netns.h"
+
+#define NFSDBG_FACILITY NFSDBG_CLIENT
+
+static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
+#ifdef CONFIG_NFS_V4
+
+/*
+ * Get a unique NFSv4.0 callback identifier which will be used
+ * by the V4.0 callback service to lookup the nfs_client struct
+ */
+static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+{
+ int ret = 0;
+ struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+
+ if (clp->rpc_ops->version != 4 || minorversion != 0)
+ return ret;
+retry:
+ if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL))
+ return -ENOMEM;
+ spin_lock(&nn->nfs_client_lock);
+ ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident);
+ spin_unlock(&nn->nfs_client_lock);
+ if (ret == -EAGAIN)
+ goto retry;
+ return ret;
+}
+#endif /* CONFIG_NFS_V4 */
+
+/*
+ * Turn off NFSv4 uid/gid mapping when using AUTH_SYS
+ */
+static bool nfs4_disable_idmapping = true;
+
+/*
+ * RPC cruft for NFS
+ */
+static const struct rpc_version *nfs_version[5] = {
+ [2] = &nfs_version2,
+#ifdef CONFIG_NFS_V3
+ [3] = &nfs_version3,
+#endif
+#ifdef CONFIG_NFS_V4
+ [4] = &nfs_version4,
+#endif
+};
+
+const struct rpc_program nfs_program = {
+ .name = "nfs",
+ .number = NFS_PROGRAM,
+ .nrvers = ARRAY_SIZE(nfs_version),
+ .version = nfs_version,
+ .stats = &nfs_rpcstat,
+ .pipe_dir_name = NFS_PIPE_DIRNAME,
+};
+
+struct rpc_stat nfs_rpcstat = {
+ .program = &nfs_program
+};
+
+
+#ifdef CONFIG_NFS_V3_ACL
+static struct rpc_stat nfsacl_rpcstat = { &nfsacl_program };
+static const struct rpc_version *nfsacl_version[] = {
+ [3] = &nfsacl_version3,
+};
+
+const struct rpc_program nfsacl_program = {
+ .name = "nfsacl",
+ .number = NFS_ACL_PROGRAM,
+ .nrvers = ARRAY_SIZE(nfsacl_version),
+ .version = nfsacl_version,
+ .stats = &nfsacl_rpcstat,
+};
+#endif /* CONFIG_NFS_V3_ACL */
+
+struct nfs_client_initdata {
+ const char *hostname;
+ const struct sockaddr *addr;
+ size_t addrlen;
+ const struct nfs_rpc_ops *rpc_ops;
+ int proto;
+ u32 minorversion;
+ struct net *net;
+};
+
+/*
+ * Allocate a shared client record
+ *
+ * Since these are allocated/deallocated very rarely, we don't
+ * bother putting them in a slab cache...
+ */
+static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
+{
+ struct nfs_client *clp;
+ struct rpc_cred *cred;
+ int err = -ENOMEM;
+
+ if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
+ goto error_0;
+
+ clp->rpc_ops = cl_init->rpc_ops;
+
+ atomic_set(&clp->cl_count, 1);
+ clp->cl_cons_state = NFS_CS_INITING;
+
+ memcpy(&clp->cl_addr, cl_init->addr, cl_init->addrlen);
+ clp->cl_addrlen = cl_init->addrlen;
+
+ if (cl_init->hostname) {
+ err = -ENOMEM;
+ clp->cl_hostname = kstrdup(cl_init->hostname, GFP_KERNEL);
+ if (!clp->cl_hostname)
+ goto error_cleanup;
+ }
+
+ INIT_LIST_HEAD(&clp->cl_superblocks);
+ clp->cl_rpcclient = ERR_PTR(-EINVAL);
+
+ clp->cl_proto = cl_init->proto;
+ clp->net = get_net(cl_init->net);
+
+#ifdef CONFIG_NFS_V4
+ err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
+ if (err)
+ goto error_cleanup;
+
+ spin_lock_init(&clp->cl_lock);
+ INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
+ rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
+ clp->cl_boot_time = CURRENT_TIME;
+ clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
+ clp->cl_minorversion = cl_init->minorversion;
+ clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
+#endif
+ cred = rpc_lookup_machine_cred("*");
+ if (!IS_ERR(cred))
+ clp->cl_machine_cred = cred;
+ nfs_fscache_get_client_cookie(clp);
+
+ return clp;
+
+error_cleanup:
+ kfree(clp);
+error_0:
+ return ERR_PTR(err);
+}
+
+#ifdef CONFIG_NFS_V4
+#ifdef CONFIG_NFS_V4_1
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+ if (nfs4_has_session(clp)) {
+ nfs4_deviceid_purge_client(clp);
+ nfs4_destroy_session(clp->cl_session);
+ }
+
+}
+#else /* CONFIG_NFS_V4_1 */
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Destroy the NFS4 callback service
+ */
+static void nfs4_destroy_callback(struct nfs_client *clp)
+{
+ if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
+ nfs_callback_down(clp->cl_mvops->minor_version);
+}
+
+static void nfs4_shutdown_client(struct nfs_client *clp)
+{
+ if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
+ nfs4_kill_renewd(clp);
+ nfs4_shutdown_session(clp);
+ nfs4_destroy_callback(clp);
+ if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
+ nfs_idmap_delete(clp);
+
+ rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+}
+
+/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
+void nfs_cleanup_cb_ident_idr(struct net *net)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ idr_destroy(&nn->cb_ident_idr);
+}
+
+/* nfs_client_lock held */
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+ struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+
+ if (clp->cl_cb_ident)
+ idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident);
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+ rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
+}
+
+static void nfs4_destroy_server(struct nfs_server *server)
+{
+ nfs4_purge_state_owners(server);
+}
+
+#else
+static void nfs4_shutdown_client(struct nfs_client *clp)
+{
+}
+
+void nfs_cleanup_cb_ident_idr(struct net *net)
+{
+}
+
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+}
+
+#endif /* CONFIG_NFS_V4 */
+
+/*
+ * Destroy a shared client record
+ */
+static void nfs_free_client(struct nfs_client *clp)
+{
+ dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version);
+
+ nfs4_shutdown_client(clp);
+
+ nfs_fscache_release_client_cookie(clp);
+
+ /* -EIO all pending I/O */
+ if (!IS_ERR(clp->cl_rpcclient))
+ rpc_shutdown_client(clp->cl_rpcclient);
+
+ if (clp->cl_machine_cred != NULL)
+ put_rpccred(clp->cl_machine_cred);
+
+ put_net(clp->net);
+ kfree(clp->cl_hostname);
+ kfree(clp->server_scope);
+ kfree(clp->impl_id);
+ kfree(clp);
+
+ dprintk("<-- nfs_free_client()\n");
+}
+
+/*
+ * Release a reference to a shared client record
+ */
+void nfs_put_client(struct nfs_client *clp)
+{
+ struct nfs_net *nn;
+
+ if (!clp)
+ return;
+
+ dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count));
+ nn = net_generic(clp->net, nfs_net_id);
+
+ if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
+ list_del(&clp->cl_share_link);
+ nfs_cb_idr_remove_locked(clp);
+ spin_unlock(&nn->nfs_client_lock);
+
+ BUG_ON(!list_empty(&clp->cl_superblocks));
+
+ nfs_free_client(clp);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_put_client);
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+/*
+ * Test if two ip6 socket addresses refer to the same socket by
+ * comparing relevant fields. The padding bytes specifically, are not
+ * compared. sin6_flowinfo is not compared because it only affects QoS
+ * and sin6_scope_id is only compared if the address is "link local"
+ * because "link local" addresses need only be unique to a specific
+ * link. Conversely, ordinary unicast addresses might have different
+ * sin6_scope_id.
+ *
+ * The caller should ensure both socket addresses are AF_INET6.
+ */
+static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
+ const struct sockaddr *sa2)
+{
+ const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1;
+ const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2;
+
+ if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
+ return 0;
+ else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ return sin1->sin6_scope_id == sin2->sin6_scope_id;
+
+ return 1;
+}
+#else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */
+static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1,
+ const struct sockaddr *sa2)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Test if two ip4 socket addresses refer to the same socket, by
+ * comparing relevant fields. The padding bytes specifically, are
+ * not compared.
+ *
+ * The caller should ensure both socket addresses are AF_INET.
+ */
+static int nfs_sockaddr_match_ipaddr4(const struct sockaddr *sa1,
+ const struct sockaddr *sa2)
+{
+ const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1;
+ const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2;
+
+ return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
+}
+
+static int nfs_sockaddr_cmp_ip6(const struct sockaddr *sa1,
+ const struct sockaddr *sa2)
+{
+ const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1;
+ const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2;
+
+ return nfs_sockaddr_match_ipaddr6(sa1, sa2) &&
+ (sin1->sin6_port == sin2->sin6_port);
+}
+
+static int nfs_sockaddr_cmp_ip4(const struct sockaddr *sa1,
+ const struct sockaddr *sa2)
+{
+ const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1;
+ const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2;
+
+ return nfs_sockaddr_match_ipaddr4(sa1, sa2) &&
+ (sin1->sin_port == sin2->sin_port);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Test if two socket addresses represent the same actual socket,
+ * by comparing (only) relevant fields, excluding the port number.
+ */
+static int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1,
+ const struct sockaddr *sa2)
+{
+ if (sa1->sa_family != sa2->sa_family)
+ return 0;
+
+ switch (sa1->sa_family) {
+ case AF_INET:
+ return nfs_sockaddr_match_ipaddr4(sa1, sa2);
+ case AF_INET6:
+ return nfs_sockaddr_match_ipaddr6(sa1, sa2);
+ }
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Test if two socket addresses represent the same actual socket,
+ * by comparing (only) relevant fields, including the port number.
+ */
+static int nfs_sockaddr_cmp(const struct sockaddr *sa1,
+ const struct sockaddr *sa2)
+{
+ if (sa1->sa_family != sa2->sa_family)
+ return 0;
+
+ switch (sa1->sa_family) {
+ case AF_INET:
+ return nfs_sockaddr_cmp_ip4(sa1, sa2);
+ case AF_INET6:
+ return nfs_sockaddr_cmp_ip6(sa1, sa2);
+ }
+ return 0;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/* Common match routine for v4.0 and v4.1 callback services */
+static bool nfs4_cb_match_client(const struct sockaddr *addr,
+ struct nfs_client *clp, u32 minorversion)
+{
+ struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
+
+ /* Don't match clients that failed to initialise */
+ if (!(clp->cl_cons_state == NFS_CS_READY ||
+ clp->cl_cons_state == NFS_CS_SESSION_INITING))
+ return false;
+
+ /* Match the version and minorversion */
+ if (clp->rpc_ops->version != 4 ||
+ clp->cl_minorversion != minorversion)
+ return false;
+
+ /* Match only the IP address, not the port number */
+ if (!nfs_sockaddr_match_ipaddr(addr, clap))
+ return false;
+
+ return true;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Find an nfs_client on the list that matches the initialisation data
+ * that is supplied.
+ */
+static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *data)
+{
+ struct nfs_client *clp;
+ const struct sockaddr *sap = data->addr;
+ struct nfs_net *nn = net_generic(data->net, nfs_net_id);
+
+ list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+ const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
+ /* Don't match clients that failed to initialise properly */
+ if (clp->cl_cons_state < 0)
+ continue;
+
+ /* Different NFS versions cannot share the same nfs_client */
+ if (clp->rpc_ops != data->rpc_ops)
+ continue;
+
+ if (clp->cl_proto != data->proto)
+ continue;
+ /* Match nfsv4 minorversion */
+ if (clp->cl_minorversion != data->minorversion)
+ continue;
+ /* Match the full socket address */
+ if (!nfs_sockaddr_cmp(sap, clap))
+ continue;
+
+ atomic_inc(&clp->cl_count);
+ return clp;
+ }
+ return NULL;
+}
+
+/*
+ * Look up a client by IP address and protocol version
+ * - creates a new record if one doesn't yet exist
+ */
+static struct nfs_client *
+nfs_get_client(const struct nfs_client_initdata *cl_init,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour,
+ int noresvport)
+{
+ struct nfs_client *clp, *new = NULL;
+ int error;
+ struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
+
+ dprintk("--> nfs_get_client(%s,v%u)\n",
+ cl_init->hostname ?: "", cl_init->rpc_ops->version);
+
+ /* see if the client already exists */
+ do {
+ spin_lock(&nn->nfs_client_lock);
+
+ clp = nfs_match_client(cl_init);
+ if (clp)
+ goto found_client;
+ if (new)
+ goto install_client;
+
+ spin_unlock(&nn->nfs_client_lock);
+
+ new = nfs_alloc_client(cl_init);
+ } while (!IS_ERR(new));
+
+ dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new));
+ return new;
+
+ /* install a new client and return with it unready */
+install_client:
+ clp = new;
+ list_add(&clp->cl_share_link, &nn->nfs_client_list);
+ spin_unlock(&nn->nfs_client_lock);
+
+ error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr,
+ authflavour, noresvport);
+ if (error < 0) {
+ nfs_put_client(clp);
+ return ERR_PTR(error);
+ }
+ dprintk("--> nfs_get_client() = %p [new]\n", clp);
+ return clp;
+
+ /* found an existing client
+ * - make sure it's ready before returning
+ */
+found_client:
+ spin_unlock(&nn->nfs_client_lock);
+
+ if (new)
+ nfs_free_client(new);
+
+ error = wait_event_killable(nfs_client_active_wq,
+ clp->cl_cons_state < NFS_CS_INITING);
+ if (error < 0) {
+ nfs_put_client(clp);
+ return ERR_PTR(-ERESTARTSYS);
+ }
+
+ if (clp->cl_cons_state < NFS_CS_READY) {
+ error = clp->cl_cons_state;
+ nfs_put_client(clp);
+ return ERR_PTR(error);
+ }
+
+ BUG_ON(clp->cl_cons_state != NFS_CS_READY);
+
+ dprintk("--> nfs_get_client() = %p [share]\n", clp);
+ return clp;
+}
+
+/*
+ * Mark a server as ready or failed
+ */
+void nfs_mark_client_ready(struct nfs_client *clp, int state)
+{
+ clp->cl_cons_state = state;
+ wake_up_all(&nfs_client_active_wq);
+}
+
+/*
+ * With sessions, the client is not marked ready until after a
+ * successful EXCHANGE_ID and CREATE_SESSION.
+ *
+ * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
+ * other versions of NFS can be tried.
+ */
+int nfs4_check_client_ready(struct nfs_client *clp)
+{
+ if (!nfs4_has_session(clp))
+ return 0;
+ if (clp->cl_cons_state < NFS_CS_READY)
+ return -EPROTONOSUPPORT;
+ return 0;
+}
+
+/*
+ * Initialise the timeout values for a connection
+ */
+static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
+ unsigned int timeo, unsigned int retrans)
+{
+ to->to_initval = timeo * HZ / 10;
+ to->to_retries = retrans;
+
+ switch (proto) {
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_RDMA:
+ if (to->to_retries == 0)
+ to->to_retries = NFS_DEF_TCP_RETRANS;
+ if (to->to_initval == 0)
+ to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
+ if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
+ to->to_initval = NFS_MAX_TCP_TIMEOUT;
+ to->to_increment = to->to_initval;
+ to->to_maxval = to->to_initval + (to->to_increment * to->to_retries);
+ if (to->to_maxval > NFS_MAX_TCP_TIMEOUT)
+ to->to_maxval = NFS_MAX_TCP_TIMEOUT;
+ if (to->to_maxval < to->to_initval)
+ to->to_maxval = to->to_initval;
+ to->to_exponential = 0;
+ break;
+ case XPRT_TRANSPORT_UDP:
+ if (to->to_retries == 0)
+ to->to_retries = NFS_DEF_UDP_RETRANS;
+ if (!to->to_initval)
+ to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10;
+ if (to->to_initval > NFS_MAX_UDP_TIMEOUT)
+ to->to_initval = NFS_MAX_UDP_TIMEOUT;
+ to->to_maxval = NFS_MAX_UDP_TIMEOUT;
+ to->to_exponential = 1;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Create an RPC client handle
+ */
+static int nfs_create_rpc_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ rpc_authflavor_t flavor,
+ int discrtry, int noresvport)
+{
+ struct rpc_clnt *clnt = NULL;
+ struct rpc_create_args args = {
+ .net = clp->net,
+ .protocol = clp->cl_proto,
+ .address = (struct sockaddr *)&clp->cl_addr,
+ .addrsize = clp->cl_addrlen,
+ .timeout = timeparms,
+ .servername = clp->cl_hostname,
+ .program = &nfs_program,
+ .version = clp->rpc_ops->version,
+ .authflavor = flavor,
+ };
+
+ if (discrtry)
+ args.flags |= RPC_CLNT_CREATE_DISCRTRY;
+ if (noresvport)
+ args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
+
+ if (!IS_ERR(clp->cl_rpcclient))
+ return 0;
+
+ clnt = rpc_create(&args);
+ if (IS_ERR(clnt)) {
+ dprintk("%s: cannot create RPC client. Error = %ld\n",
+ __func__, PTR_ERR(clnt));
+ return PTR_ERR(clnt);
+ }
+
+ clp->cl_rpcclient = clnt;
+ return 0;
+}
+
+/*
+ * Version 2 or 3 client destruction
+ */
+static void nfs_destroy_server(struct nfs_server *server)
+{
+ if (!(server->flags & NFS_MOUNT_LOCAL_FLOCK) ||
+ !(server->flags & NFS_MOUNT_LOCAL_FCNTL))
+ nlmclnt_done(server->nlm_host);
+}
+
+/*
+ * Version 2 or 3 lockd setup
+ */
+static int nfs_start_lockd(struct nfs_server *server)
+{
+ struct nlm_host *host;
+ struct nfs_client *clp = server->nfs_client;
+ struct nlmclnt_initdata nlm_init = {
+ .hostname = clp->cl_hostname,
+ .address = (struct sockaddr *)&clp->cl_addr,
+ .addrlen = clp->cl_addrlen,
+ .nfs_version = clp->rpc_ops->version,
+ .noresvport = server->flags & NFS_MOUNT_NORESVPORT ?
+ 1 : 0,
+ .net = clp->net,
+ };
+
+ if (nlm_init.nfs_version > 3)
+ return 0;
+ if ((server->flags & NFS_MOUNT_LOCAL_FLOCK) &&
+ (server->flags & NFS_MOUNT_LOCAL_FCNTL))
+ return 0;
+
+ switch (clp->cl_proto) {
+ default:
+ nlm_init.protocol = IPPROTO_TCP;
+ break;
+ case XPRT_TRANSPORT_UDP:
+ nlm_init.protocol = IPPROTO_UDP;
+ }
+
+ host = nlmclnt_init(&nlm_init);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ server->nlm_host = host;
+ server->destroy = nfs_destroy_server;
+ return 0;
+}
+
+/*
+ * Initialise an NFSv3 ACL client connection
+ */
+#ifdef CONFIG_NFS_V3_ACL
+static void nfs_init_server_aclclient(struct nfs_server *server)
+{
+ if (server->nfs_client->rpc_ops->version != 3)
+ goto out_noacl;
+ if (server->flags & NFS_MOUNT_NOACL)
+ goto out_noacl;
+
+ server->client_acl = rpc_bind_new_program(server->client, &nfsacl_program, 3);
+ if (IS_ERR(server->client_acl))
+ goto out_noacl;
+
+ /* No errors! Assume that Sun nfsacls are supported */
+ server->caps |= NFS_CAP_ACLS;
+ return;
+
+out_noacl:
+ server->caps &= ~NFS_CAP_ACLS;
+}
+#else
+static inline void nfs_init_server_aclclient(struct nfs_server *server)
+{
+ server->flags &= ~NFS_MOUNT_NOACL;
+ server->caps &= ~NFS_CAP_ACLS;
+}
+#endif
+
+/*
+ * Create a general RPC client
+ */
+static int nfs_init_server_rpcclient(struct nfs_server *server,
+ const struct rpc_timeout *timeo,
+ rpc_authflavor_t pseudoflavour)
+{
+ struct nfs_client *clp = server->nfs_client;
+
+ server->client = rpc_clone_client(clp->cl_rpcclient);
+ if (IS_ERR(server->client)) {
+ dprintk("%s: couldn't create rpc_client!\n", __func__);
+ return PTR_ERR(server->client);
+ }
+
+ memcpy(&server->client->cl_timeout_default,
+ timeo,
+ sizeof(server->client->cl_timeout_default));
+ server->client->cl_timeout = &server->client->cl_timeout_default;
+
+ if (pseudoflavour != clp->cl_rpcclient->cl_auth->au_flavor) {
+ struct rpc_auth *auth;
+
+ auth = rpcauth_create(pseudoflavour, server->client);
+ if (IS_ERR(auth)) {
+ dprintk("%s: couldn't create credcache!\n", __func__);
+ return PTR_ERR(auth);
+ }
+ }
+ server->client->cl_softrtry = 0;
+ if (server->flags & NFS_MOUNT_SOFT)
+ server->client->cl_softrtry = 1;
+
+ return 0;
+}
+
+/*
+ * Initialise an NFS2 or NFS3 client
+ */
+int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms,
+ const char *ip_addr, rpc_authflavor_t authflavour,
+ int noresvport)
+{
+ int error;
+
+ if (clp->cl_cons_state == NFS_CS_READY) {
+ /* the client is already initialised */
+ dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp);
+ return 0;
+ }
+
+ /*
+ * Create a client RPC handle for doing FSSTAT with UNIX auth only
+ * - RFC 2623, sec 2.3.2
+ */
+ error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX,
+ 0, noresvport);
+ if (error < 0)
+ goto error;
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+ return 0;
+
+error:
+ nfs_mark_client_ready(clp, error);
+ dprintk("<-- nfs_init_client() = xerror %d\n", error);
+ return error;
+}
+
+/*
+ * Create a version 2 or 3 client
+ */
+static int nfs_init_server(struct nfs_server *server,
+ const struct nfs_parsed_mount_data *data)
+{
+ struct nfs_client_initdata cl_init = {
+ .hostname = data->nfs_server.hostname,
+ .addr = (const struct sockaddr *)&data->nfs_server.address,
+ .addrlen = data->nfs_server.addrlen,
+ .rpc_ops = &nfs_v2_clientops,
+ .proto = data->nfs_server.protocol,
+ .net = data->net,
+ };
+ struct rpc_timeout timeparms;
+ struct nfs_client *clp;
+ int error;
+
+ dprintk("--> nfs_init_server()\n");
+
+#ifdef CONFIG_NFS_V3
+ if (data->version == 3)
+ cl_init.rpc_ops = &nfs_v3_clientops;
+#endif
+
+ nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
+ data->timeo, data->retrans);
+
+ /* Allocate or find a client reference we can use */
+ clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX,
+ data->flags & NFS_MOUNT_NORESVPORT);
+ if (IS_ERR(clp)) {
+ dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
+ return PTR_ERR(clp);
+ }
+
+ server->nfs_client = clp;
+
+ /* Initialise the client representation from the mount data */
+ server->flags = data->flags;
+ server->options = data->options;
+ server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
+ NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
+ NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
+
+ if (data->rsize)
+ server->rsize = nfs_block_size(data->rsize, NULL);
+ if (data->wsize)
+ server->wsize = nfs_block_size(data->wsize, NULL);
+
+ server->acregmin = data->acregmin * HZ;
+ server->acregmax = data->acregmax * HZ;
+ server->acdirmin = data->acdirmin * HZ;
+ server->acdirmax = data->acdirmax * HZ;
+
+ /* Start lockd here, before we might error out */
+ error = nfs_start_lockd(server);
+ if (error < 0)
+ goto error;
+
+ server->port = data->nfs_server.port;
+
+ error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
+ if (error < 0)
+ goto error;
+
+ /* Preserve the values of mount_server-related mount options */
+ if (data->mount_server.addrlen) {
+ memcpy(&server->mountd_address, &data->mount_server.address,
+ data->mount_server.addrlen);
+ server->mountd_addrlen = data->mount_server.addrlen;
+ }
+ server->mountd_version = data->mount_server.version;
+ server->mountd_port = data->mount_server.port;
+ server->mountd_protocol = data->mount_server.protocol;
+
+ server->namelen = data->namlen;
+ /* Create a client RPC handle for the NFSv3 ACL management interface */
+ nfs_init_server_aclclient(server);
+ dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp);
+ return 0;
+
+error:
+ server->nfs_client = NULL;
+ nfs_put_client(clp);
+ dprintk("<-- nfs_init_server() = xerror %d\n", error);
+ return error;
+}
+
+/*
+ * Load up the server record from information gained in an fsinfo record
+ */
+static void nfs_server_set_fsinfo(struct nfs_server *server,
+ struct nfs_fh *mntfh,
+ struct nfs_fsinfo *fsinfo)
+{
+ unsigned long max_rpc_payload;
+
+ /* Work out a lot of parameters */
+ if (server->rsize == 0)
+ server->rsize = nfs_block_size(fsinfo->rtpref, NULL);
+ if (server->wsize == 0)
+ server->wsize = nfs_block_size(fsinfo->wtpref, NULL);
+
+ if (fsinfo->rtmax >= 512 && server->rsize > fsinfo->rtmax)
+ server->rsize = nfs_block_size(fsinfo->rtmax, NULL);
+ if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax)
+ server->wsize = nfs_block_size(fsinfo->wtmax, NULL);
+
+ max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL);
+ if (server->rsize > max_rpc_payload)
+ server->rsize = max_rpc_payload;
+ if (server->rsize > NFS_MAX_FILE_IO_SIZE)
+ server->rsize = NFS_MAX_FILE_IO_SIZE;
+ server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+
+ server->backing_dev_info.name = "nfs";
+ server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
+
+ if (server->wsize > max_rpc_payload)
+ server->wsize = max_rpc_payload;
+ if (server->wsize > NFS_MAX_FILE_IO_SIZE)
+ server->wsize = NFS_MAX_FILE_IO_SIZE;
+ server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ server->pnfs_blksize = fsinfo->blksize;
+ set_pnfs_layoutdriver(server, mntfh, fsinfo->layouttype);
+
+ server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL);
+
+ server->dtsize = nfs_block_size(fsinfo->dtpref, NULL);
+ if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES)
+ server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES;
+ if (server->dtsize > server->rsize)
+ server->dtsize = server->rsize;
+
+ if (server->flags & NFS_MOUNT_NOAC) {
+ server->acregmin = server->acregmax = 0;
+ server->acdirmin = server->acdirmax = 0;
+ }
+
+ server->maxfilesize = fsinfo->maxfilesize;
+
+ server->time_delta = fsinfo->time_delta;
+
+ /* We're airborne Set socket buffersize */
+ rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100);
+}
+
+/*
+ * Probe filesystem information, including the FSID on v2/v3
+ */
+static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs_fattr *fattr)
+{
+ struct nfs_fsinfo fsinfo;
+ struct nfs_client *clp = server->nfs_client;
+ int error;
+
+ dprintk("--> nfs_probe_fsinfo()\n");
+
+ if (clp->rpc_ops->set_capabilities != NULL) {
+ error = clp->rpc_ops->set_capabilities(server, mntfh);
+ if (error < 0)
+ goto out_error;
+ }
+
+ fsinfo.fattr = fattr;
+ fsinfo.layouttype = 0;
+ error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo);
+ if (error < 0)
+ goto out_error;
+
+ nfs_server_set_fsinfo(server, mntfh, &fsinfo);
+
+ /* Get some general file system info */
+ if (server->namelen == 0) {
+ struct nfs_pathconf pathinfo;
+
+ pathinfo.fattr = fattr;
+ nfs_fattr_init(fattr);
+
+ if (clp->rpc_ops->pathconf(server, mntfh, &pathinfo) >= 0)
+ server->namelen = pathinfo.max_namelen;
+ }
+
+ dprintk("<-- nfs_probe_fsinfo() = 0\n");
+ return 0;
+
+out_error:
+ dprintk("nfs_probe_fsinfo: error = %d\n", -error);
+ return error;
+}
+
+/*
+ * Copy useful information when duplicating a server record
+ */
+static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source)
+{
+ target->flags = source->flags;
+ target->rsize = source->rsize;
+ target->wsize = source->wsize;
+ target->acregmin = source->acregmin;
+ target->acregmax = source->acregmax;
+ target->acdirmin = source->acdirmin;
+ target->acdirmax = source->acdirmax;
+ target->caps = source->caps;
+ target->options = source->options;
+}
+
+static void nfs_server_insert_lists(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs_net *nn = net_generic(clp->net, nfs_net_id);
+
+ spin_lock(&nn->nfs_client_lock);
+ list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
+ list_add_tail(&server->master_link, &nn->nfs_volume_list);
+ clear_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
+ spin_unlock(&nn->nfs_client_lock);
+
+}
+
+static void nfs_server_remove_lists(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs_net *nn;
+
+ if (clp == NULL)
+ return;
+ nn = net_generic(clp->net, nfs_net_id);
+ spin_lock(&nn->nfs_client_lock);
+ list_del_rcu(&server->client_link);
+ if (list_empty(&clp->cl_superblocks))
+ set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
+ list_del(&server->master_link);
+ spin_unlock(&nn->nfs_client_lock);
+
+ synchronize_rcu();
+}
+
+/*
+ * Allocate and initialise a server record
+ */
+static struct nfs_server *nfs_alloc_server(void)
+{
+ struct nfs_server *server;
+
+ server = kzalloc(sizeof(struct nfs_server), GFP_KERNEL);
+ if (!server)
+ return NULL;
+
+ server->client = server->client_acl = ERR_PTR(-EINVAL);
+
+ /* Zero out the NFS state stuff */
+ INIT_LIST_HEAD(&server->client_link);
+ INIT_LIST_HEAD(&server->master_link);
+ INIT_LIST_HEAD(&server->delegations);
+ INIT_LIST_HEAD(&server->layouts);
+ INIT_LIST_HEAD(&server->state_owners_lru);
+
+ atomic_set(&server->active, 0);
+
+ server->io_stats = nfs_alloc_iostats();
+ if (!server->io_stats) {
+ kfree(server);
+ return NULL;
+ }
+
+ if (bdi_init(&server->backing_dev_info)) {
+ nfs_free_iostats(server->io_stats);
+ kfree(server);
+ return NULL;
+ }
+
+ ida_init(&server->openowner_id);
+ ida_init(&server->lockowner_id);
+ pnfs_init_server(server);
+
+ return server;
+}
+
+/*
+ * Free up a server record
+ */
+void nfs_free_server(struct nfs_server *server)
+{
+ dprintk("--> nfs_free_server()\n");
+
+ nfs_server_remove_lists(server);
+ unset_pnfs_layoutdriver(server);
+
+ if (server->destroy != NULL)
+ server->destroy(server);
+
+ if (!IS_ERR(server->client_acl))
+ rpc_shutdown_client(server->client_acl);
+ if (!IS_ERR(server->client))
+ rpc_shutdown_client(server->client);
+
+ nfs_put_client(server->nfs_client);
+
+ ida_destroy(&server->lockowner_id);
+ ida_destroy(&server->openowner_id);
+ nfs_free_iostats(server->io_stats);
+ bdi_destroy(&server->backing_dev_info);
+ kfree(server);
+ nfs_release_automount_timer();
+ dprintk("<-- nfs_free_server()\n");
+}
+
+/*
+ * Create a version 2 or 3 volume record
+ * - keyed on server and FSID
+ */
+struct nfs_server *nfs_create_server(const struct nfs_parsed_mount_data *data,
+ struct nfs_fh *mntfh)
+{
+ struct nfs_server *server;
+ struct nfs_fattr *fattr;
+ int error;
+
+ server = nfs_alloc_server();
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+
+ error = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto error;
+
+ /* Get a client representation */
+ error = nfs_init_server(server, data);
+ if (error < 0)
+ goto error;
+
+ BUG_ON(!server->nfs_client);
+ BUG_ON(!server->nfs_client->rpc_ops);
+ BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
+
+ /* Probe the root fh to retrieve its FSID */
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
+ if (error < 0)
+ goto error;
+ if (server->nfs_client->rpc_ops->version == 3) {
+ if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN)
+ server->namelen = NFS3_MAXNAMLEN;
+ if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
+ server->caps |= NFS_CAP_READDIRPLUS;
+ } else {
+ if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN)
+ server->namelen = NFS2_MAXNAMLEN;
+ }
+
+ if (!(fattr->valid & NFS_ATTR_FATTR)) {
+ error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
+ if (error < 0) {
+ dprintk("nfs_create_server: getattr error = %d\n", -error);
+ goto error;
+ }
+ }
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
+
+ dprintk("Server FSID: %llx:%llx\n",
+ (unsigned long long) server->fsid.major,
+ (unsigned long long) server->fsid.minor);
+
+ nfs_server_insert_lists(server);
+ server->mount_time = jiffies;
+ nfs_free_fattr(fattr);
+ return server;
+
+error:
+ nfs_free_fattr(fattr);
+ nfs_free_server(server);
+ return ERR_PTR(error);
+}
+
+#ifdef CONFIG_NFS_V4
+/*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by callback identifier
+ */
+struct nfs_client *
+nfs4_find_client_ident(struct net *net, int cb_ident)
+{
+ struct nfs_client *clp;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ spin_lock(&nn->nfs_client_lock);
+ clp = idr_find(&nn->cb_ident_idr, cb_ident);
+ if (clp)
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+ return clp;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * NFSv4.1 callback thread helper
+ * For CB_COMPOUND calls, find a client by IP address, protocol version,
+ * minorversion, and sessionID
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
+ struct nfs4_sessionid *sid)
+{
+ struct nfs_client *clp;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+ if (nfs4_cb_match_client(addr, clp, 1) == false)
+ continue;
+
+ if (!nfs4_has_session(clp))
+ continue;
+
+ /* Match sessionid*/
+ if (memcmp(clp->cl_session->sess_id.data,
+ sid->data, NFS4_MAX_SESSIONID_LEN) != 0)
+ continue;
+
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+ return clp;
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ return NULL;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+struct nfs_client *
+nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
+ struct nfs4_sessionid *sid)
+{
+ return NULL;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * Initialize the NFS4 callback service
+ */
+static int nfs4_init_callback(struct nfs_client *clp)
+{
+ int error;
+
+ if (clp->rpc_ops->version == 4) {
+ struct rpc_xprt *xprt;
+
+ xprt = rcu_dereference_raw(clp->cl_rpcclient->cl_xprt);
+
+ if (nfs4_has_session(clp)) {
+ error = xprt_setup_backchannel(xprt,
+ NFS41_BC_MIN_CALLBACKS);
+ if (error < 0)
+ return error;
+ }
+
+ error = nfs_callback_up(clp->cl_mvops->minor_version, xprt);
+ if (error < 0) {
+ dprintk("%s: failed to start callback. Error = %d\n",
+ __func__, error);
+ return error;
+ }
+ __set_bit(NFS_CS_CALLBACK, &clp->cl_res_state);
+ }
+ return 0;
+}
+
+/*
+ * Initialize the minor version specific parts of an NFS4 client record
+ */
+static int nfs4_init_client_minor_version(struct nfs_client *clp)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (clp->cl_mvops->minor_version) {
+ struct nfs4_session *session = NULL;
+ /*
+ * Create the session and mark it expired.
+ * When a SEQUENCE operation encounters the expired session
+ * it will do session recovery to initialize it.
+ */
+ session = nfs4_alloc_session(clp);
+ if (!session)
+ return -ENOMEM;
+
+ clp->cl_session = session;
+ /*
+ * The create session reply races with the server back
+ * channel probe. Mark the client NFS_CS_SESSION_INITING
+ * so that the client back channel can find the
+ * nfs_client struct
+ */
+ clp->cl_cons_state = NFS_CS_SESSION_INITING;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+
+ return nfs4_init_callback(clp);
+}
+
+/*
+ * Initialise an NFS4 client record
+ */
+int nfs4_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour,
+ int noresvport)
+{
+ char buf[INET6_ADDRSTRLEN + 1];
+ int error;
+
+ if (clp->cl_cons_state == NFS_CS_READY) {
+ /* the client is initialised already */
+ dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
+ return 0;
+ }
+
+ /* Check NFS protocol revision and initialize RPC op vector */
+ clp->rpc_ops = &nfs_v4_clientops;
+
+ error = nfs_create_rpc_client(clp, timeparms, authflavour,
+ 1, noresvport);
+ if (error < 0)
+ goto error;
+
+ /* If no clientaddr= option was specified, find a usable cb address */
+ if (ip_addr == NULL) {
+ struct sockaddr_storage cb_addr;
+ struct sockaddr *sap = (struct sockaddr *)&cb_addr;
+
+ error = rpc_localaddr(clp->cl_rpcclient, sap, sizeof(cb_addr));
+ if (error < 0)
+ goto error;
+ error = rpc_ntop(sap, buf, sizeof(buf));
+ if (error < 0)
+ goto error;
+ ip_addr = (const char *)buf;
+ }
+ strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
+
+ error = nfs_idmap_new(clp);
+ if (error < 0) {
+ dprintk("%s: failed to create idmapper. Error = %d\n",
+ __func__, error);
+ goto error;
+ }
+ __set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
+
+ error = nfs4_init_client_minor_version(clp);
+ if (error < 0)
+ goto error;
+
+ if (!nfs4_has_session(clp))
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+ return 0;
+
+error:
+ nfs_mark_client_ready(clp, error);
+ dprintk("<-- nfs4_init_client() = xerror %d\n", error);
+ return error;
+}
+
+/*
+ * Set up an NFS4 client
+ */
+static int nfs4_set_client(struct nfs_server *server,
+ const char *hostname,
+ const struct sockaddr *addr,
+ const size_t addrlen,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour,
+ int proto, const struct rpc_timeout *timeparms,
+ u32 minorversion, struct net *net)
+{
+ struct nfs_client_initdata cl_init = {
+ .hostname = hostname,
+ .addr = addr,
+ .addrlen = addrlen,
+ .rpc_ops = &nfs_v4_clientops,
+ .proto = proto,
+ .minorversion = minorversion,
+ .net = net,
+ };
+ struct nfs_client *clp;
+ int error;
+
+ dprintk("--> nfs4_set_client()\n");
+
+ /* Allocate or find a client reference we can use */
+ clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour,
+ server->flags & NFS_MOUNT_NORESVPORT);
+ if (IS_ERR(clp)) {
+ error = PTR_ERR(clp);
+ goto error;
+ }
+
+ /*
+ * Query for the lease time on clientid setup or renewal
+ *
+ * Note that this will be set on nfs_clients that were created
+ * only for the DS role and did not set this bit, but now will
+ * serve a dual role.
+ */
+ set_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state);
+
+ server->nfs_client = clp;
+ dprintk("<-- nfs4_set_client() = 0 [new %p]\n", clp);
+ return 0;
+error:
+ dprintk("<-- nfs4_set_client() = xerror %d\n", error);
+ return error;
+}
+
+/*
+ * Set up a pNFS Data Server client.
+ *
+ * Return any existing nfs_client that matches server address,port,version
+ * and minorversion.
+ *
+ * For a new nfs_client, use a soft mount (default), a low retrans and a
+ * low timeout interval so that if a connection is lost, we retry through
+ * the MDS.
+ */
+struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
+ const struct sockaddr *ds_addr,
+ int ds_addrlen, int ds_proto)
+{
+ struct nfs_client_initdata cl_init = {
+ .addr = ds_addr,
+ .addrlen = ds_addrlen,
+ .rpc_ops = &nfs_v4_clientops,
+ .proto = ds_proto,
+ .minorversion = mds_clp->cl_minorversion,
+ .net = mds_clp->net,
+ };
+ struct rpc_timeout ds_timeout = {
+ .to_initval = 15 * HZ,
+ .to_maxval = 15 * HZ,
+ .to_retries = 1,
+ .to_exponential = 1,
+ };
+ struct nfs_client *clp;
+
+ /*
+ * Set an authflavor equual to the MDS value. Use the MDS nfs_client
+ * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
+ * (section 13.1 RFC 5661).
+ */
+ clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
+ mds_clp->cl_rpcclient->cl_auth->au_flavor, 0);
+
+ dprintk("<-- %s %p\n", __func__, clp);
+ return clp;
+}
+EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
+
+/*
+ * Session has been established, and the client marked ready.
+ * Set the mount rsize and wsize with negotiated fore channel
+ * attributes which will be bound checked in nfs_server_set_fsinfo.
+ */
+static void nfs4_session_set_rwsize(struct nfs_server *server)
+{
+#ifdef CONFIG_NFS_V4_1
+ struct nfs4_session *sess;
+ u32 server_resp_sz;
+ u32 server_rqst_sz;
+
+ if (!nfs4_has_session(server->nfs_client))
+ return;
+ sess = server->nfs_client->cl_session;
+ server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
+ server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
+
+ if (server->rsize > server_resp_sz)
+ server->rsize = server_resp_sz;
+ if (server->wsize > server_rqst_sz)
+ server->wsize = server_rqst_sz;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+static int nfs4_server_common_setup(struct nfs_server *server,
+ struct nfs_fh *mntfh)
+{
+ struct nfs_fattr *fattr;
+ int error;
+
+ BUG_ON(!server->nfs_client);
+ BUG_ON(!server->nfs_client->rpc_ops);
+ BUG_ON(!server->nfs_client->rpc_ops->file_inode_ops);
+
+ /* data servers support only a subset of NFSv4.1 */
+ if (is_ds_only_client(server->nfs_client))
+ return -EPROTONOSUPPORT;
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ return -ENOMEM;
+
+ /* We must ensure the session is initialised first */
+ error = nfs4_init_session(server);
+ if (error < 0)
+ goto out;
+
+ /* Probe the root fh to retrieve its FSID and filehandle */
+ error = nfs4_get_rootfh(server, mntfh);
+ if (error < 0)
+ goto out;
+
+ dprintk("Server FSID: %llx:%llx\n",
+ (unsigned long long) server->fsid.major,
+ (unsigned long long) server->fsid.minor);
+ dprintk("Mount FH: %d\n", mntfh->size);
+
+ nfs4_session_set_rwsize(server);
+
+ error = nfs_probe_fsinfo(server, mntfh, fattr);
+ if (error < 0)
+ goto out;
+
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
+ nfs_server_insert_lists(server);
+ server->mount_time = jiffies;
+ server->destroy = nfs4_destroy_server;
+out:
+ nfs_free_fattr(fattr);
+ return error;
+}
+
+/*
+ * Create a version 4 volume record
+ */
+static int nfs4_init_server(struct nfs_server *server,
+ const struct nfs_parsed_mount_data *data)
+{
+ struct rpc_timeout timeparms;
+ int error;
+
+ dprintk("--> nfs4_init_server()\n");
+
+ nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
+ data->timeo, data->retrans);
+
+ /* Initialise the client representation from the mount data */
+ server->flags = data->flags;
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR|NFS_CAP_POSIX_LOCK;
+ if (!(data->flags & NFS_MOUNT_NORDIRPLUS))
+ server->caps |= NFS_CAP_READDIRPLUS;
+ server->options = data->options;
+
+ /* Get a client record */
+ error = nfs4_set_client(server,
+ data->nfs_server.hostname,
+ (const struct sockaddr *)&data->nfs_server.address,
+ data->nfs_server.addrlen,
+ data->client_address,
+ data->auth_flavors[0],
+ data->nfs_server.protocol,
+ &timeparms,
+ data->minorversion,
+ data->net);
+ if (error < 0)
+ goto error;
+
+ /*
+ * Don't use NFS uid/gid mapping if we're using AUTH_SYS or lower
+ * authentication.
+ */
+ if (nfs4_disable_idmapping && data->auth_flavors[0] == RPC_AUTH_UNIX)
+ server->caps |= NFS_CAP_UIDGID_NOMAP;
+
+ if (data->rsize)
+ server->rsize = nfs_block_size(data->rsize, NULL);
+ if (data->wsize)
+ server->wsize = nfs_block_size(data->wsize, NULL);
+
+ server->acregmin = data->acregmin * HZ;
+ server->acregmax = data->acregmax * HZ;
+ server->acdirmin = data->acdirmin * HZ;
+ server->acdirmax = data->acdirmax * HZ;
+
+ server->port = data->nfs_server.port;
+
+ error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
+
+error:
+ /* Done */
+ dprintk("<-- nfs4_init_server() = %d\n", error);
+ return error;
+}
+
+/*
+ * Create a version 4 volume record
+ * - keyed on server and FSID
+ */
+struct nfs_server *nfs4_create_server(const struct nfs_parsed_mount_data *data,
+ struct nfs_fh *mntfh)
+{
+ struct nfs_server *server;
+ int error;
+
+ dprintk("--> nfs4_create_server()\n");
+
+ server = nfs_alloc_server();
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+
+ /* set up the general RPC client */
+ error = nfs4_init_server(server, data);
+ if (error < 0)
+ goto error;
+
+ error = nfs4_server_common_setup(server, mntfh);
+ if (error < 0)
+ goto error;
+
+ dprintk("<-- nfs4_create_server() = %p\n", server);
+ return server;
+
+error:
+ nfs_free_server(server);
+ dprintk("<-- nfs4_create_server() = error %d\n", error);
+ return ERR_PTR(error);
+}
+
+/*
+ * Create an NFS4 referral server record
+ */
+struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
+ struct nfs_fh *mntfh)
+{
+ struct nfs_client *parent_client;
+ struct nfs_server *server, *parent_server;
+ int error;
+
+ dprintk("--> nfs4_create_referral_server()\n");
+
+ server = nfs_alloc_server();
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+
+ parent_server = NFS_SB(data->sb);
+ parent_client = parent_server->nfs_client;
+
+ /* Initialise the client representation from the parent server */
+ nfs_server_copy_userdata(server, parent_server);
+ server->caps |= NFS_CAP_ATOMIC_OPEN|NFS_CAP_CHANGE_ATTR;
+
+ /* Get a client representation.
+ * Note: NFSv4 always uses TCP, */
+ error = nfs4_set_client(server, data->hostname,
+ data->addr,
+ data->addrlen,
+ parent_client->cl_ipaddr,
+ data->authflavor,
+ rpc_protocol(parent_server->client),
+ parent_server->client->cl_timeout,
+ parent_client->cl_mvops->minor_version,
+ parent_client->net);
+ if (error < 0)
+ goto error;
+
+ error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
+ if (error < 0)
+ goto error;
+
+ error = nfs4_server_common_setup(server, mntfh);
+ if (error < 0)
+ goto error;
+
+ dprintk("<-- nfs_create_referral_server() = %p\n", server);
+ return server;
+
+error:
+ nfs_free_server(server);
+ dprintk("<-- nfs4_create_referral_server() = error %d\n", error);
+ return ERR_PTR(error);
+}
+
+#endif /* CONFIG_NFS_V4 */
+
+/*
+ * Clone an NFS2, NFS3 or NFS4 server record
+ */
+struct nfs_server *nfs_clone_server(struct nfs_server *source,
+ struct nfs_fh *fh,
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t flavor)
+{
+ struct nfs_server *server;
+ struct nfs_fattr *fattr_fsinfo;
+ int error;
+
+ dprintk("--> nfs_clone_server(,%llx:%llx,)\n",
+ (unsigned long long) fattr->fsid.major,
+ (unsigned long long) fattr->fsid.minor);
+
+ server = nfs_alloc_server();
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+
+ error = -ENOMEM;
+ fattr_fsinfo = nfs_alloc_fattr();
+ if (fattr_fsinfo == NULL)
+ goto out_free_server;
+
+ /* Copy data from the source */
+ server->nfs_client = source->nfs_client;
+ server->destroy = source->destroy;
+ atomic_inc(&server->nfs_client->cl_count);
+ nfs_server_copy_userdata(server, source);
+
+ server->fsid = fattr->fsid;
+
+ error = nfs_init_server_rpcclient(server,
+ source->client->cl_timeout,
+ flavor);
+ if (error < 0)
+ goto out_free_server;
+ if (!IS_ERR(source->client_acl))
+ nfs_init_server_aclclient(server);
+
+ /* probe the filesystem info for this server filesystem */
+ error = nfs_probe_fsinfo(server, fh, fattr_fsinfo);
+ if (error < 0)
+ goto out_free_server;
+
+ if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
+ server->namelen = NFS4_MAXNAMLEN;
+
+ dprintk("Cloned FSID: %llx:%llx\n",
+ (unsigned long long) server->fsid.major,
+ (unsigned long long) server->fsid.minor);
+
+ error = nfs_start_lockd(server);
+ if (error < 0)
+ goto out_free_server;
+
+ nfs_server_insert_lists(server);
+ server->mount_time = jiffies;
+
+ nfs_free_fattr(fattr_fsinfo);
+ dprintk("<-- nfs_clone_server() = %p\n", server);
+ return server;
+
+out_free_server:
+ nfs_free_fattr(fattr_fsinfo);
+ nfs_free_server(server);
+ dprintk("<-- nfs_clone_server() = error %d\n", error);
+ return ERR_PTR(error);
+}
+
+void nfs_clients_init(struct net *net)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ INIT_LIST_HEAD(&nn->nfs_client_list);
+ INIT_LIST_HEAD(&nn->nfs_volume_list);
+#ifdef CONFIG_NFS_V4
+ idr_init(&nn->cb_ident_idr);
+#endif
+ spin_lock_init(&nn->nfs_client_lock);
+}
+
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *proc_fs_nfs;
+
+static int nfs_server_list_open(struct inode *inode, struct file *file);
+static void *nfs_server_list_start(struct seq_file *p, loff_t *pos);
+static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos);
+static void nfs_server_list_stop(struct seq_file *p, void *v);
+static int nfs_server_list_show(struct seq_file *m, void *v);
+
+static const struct seq_operations nfs_server_list_ops = {
+ .start = nfs_server_list_start,
+ .next = nfs_server_list_next,
+ .stop = nfs_server_list_stop,
+ .show = nfs_server_list_show,
+};
+
+static const struct file_operations nfs_server_list_fops = {
+ .open = nfs_server_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .owner = THIS_MODULE,
+};
+
+static int nfs_volume_list_open(struct inode *inode, struct file *file);
+static void *nfs_volume_list_start(struct seq_file *p, loff_t *pos);
+static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos);
+static void nfs_volume_list_stop(struct seq_file *p, void *v);
+static int nfs_volume_list_show(struct seq_file *m, void *v);
+
+static const struct seq_operations nfs_volume_list_ops = {
+ .start = nfs_volume_list_start,
+ .next = nfs_volume_list_next,
+ .stop = nfs_volume_list_stop,
+ .show = nfs_volume_list_show,
+};
+
+static const struct file_operations nfs_volume_list_fops = {
+ .open = nfs_volume_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * open "/proc/fs/nfsfs/servers" which provides a summary of servers with which
+ * we're dealing
+ */
+static int nfs_server_list_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+ struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info;
+ struct net *net = pid_ns->child_reaper->nsproxy->net_ns;
+
+ ret = seq_open(file, &nfs_server_list_ops);
+ if (ret < 0)
+ return ret;
+
+ m = file->private_data;
+ m->private = net;
+
+ return 0;
+}
+
+/*
+ * set up the iterator to start reading from the server list and return the first item
+ */
+static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos)
+{
+ struct nfs_net *nn = net_generic(m->private, nfs_net_id);
+
+ /* lock the list against modification */
+ spin_lock(&nn->nfs_client_lock);
+ return seq_list_start_head(&nn->nfs_client_list, *_pos);
+}
+
+/*
+ * move to next server
+ */
+static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct nfs_net *nn = net_generic(p->private, nfs_net_id);
+
+ return seq_list_next(v, &nn->nfs_client_list, pos);
+}
+
+/*
+ * clean up after reading from the transports list
+ */
+static void nfs_server_list_stop(struct seq_file *p, void *v)
+{
+ struct nfs_net *nn = net_generic(p->private, nfs_net_id);
+
+ spin_unlock(&nn->nfs_client_lock);
+}
+
+/*
+ * display a header line followed by a load of call lines
+ */
+static int nfs_server_list_show(struct seq_file *m, void *v)
+{
+ struct nfs_client *clp;
+ struct nfs_net *nn = net_generic(m->private, nfs_net_id);
+
+ /* display header on line 1 */
+ if (v == &nn->nfs_client_list) {
+ seq_puts(m, "NV SERVER PORT USE HOSTNAME\n");
+ return 0;
+ }
+
+ /* display one transport per line on subsequent lines */
+ clp = list_entry(v, struct nfs_client, cl_share_link);
+
+ /* Check if the client is initialized */
+ if (clp->cl_cons_state != NFS_CS_READY)
+ return 0;
+
+ rcu_read_lock();
+ seq_printf(m, "v%u %s %s %3d %s\n",
+ clp->rpc_ops->version,
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT),
+ atomic_read(&clp->cl_count),
+ clp->cl_hostname);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/*
+ * open "/proc/fs/nfsfs/volumes" which provides a summary of extant volumes
+ */
+static int nfs_volume_list_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+ struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info;
+ struct net *net = pid_ns->child_reaper->nsproxy->net_ns;
+
+ ret = seq_open(file, &nfs_volume_list_ops);
+ if (ret < 0)
+ return ret;
+
+ m = file->private_data;
+ m->private = net;
+
+ return 0;
+}
+
+/*
+ * set up the iterator to start reading from the volume list and return the first item
+ */
+static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos)
+{
+ struct nfs_net *nn = net_generic(m->private, nfs_net_id);
+
+ /* lock the list against modification */
+ spin_lock(&nn->nfs_client_lock);
+ return seq_list_start_head(&nn->nfs_volume_list, *_pos);
+}
+
+/*
+ * move to next volume
+ */
+static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos)
+{
+ struct nfs_net *nn = net_generic(p->private, nfs_net_id);
+
+ return seq_list_next(v, &nn->nfs_volume_list, pos);
+}
+
+/*
+ * clean up after reading from the transports list
+ */
+static void nfs_volume_list_stop(struct seq_file *p, void *v)
+{
+ struct nfs_net *nn = net_generic(p->private, nfs_net_id);
+
+ spin_unlock(&nn->nfs_client_lock);
+}
+
+/*
+ * display a header line followed by a load of call lines
+ */
+static int nfs_volume_list_show(struct seq_file *m, void *v)
+{
+ struct nfs_server *server;
+ struct nfs_client *clp;
+ char dev[8], fsid[17];
+ struct nfs_net *nn = net_generic(m->private, nfs_net_id);
+
+ /* display header on line 1 */
+ if (v == &nn->nfs_volume_list) {
+ seq_puts(m, "NV SERVER PORT DEV FSID FSC\n");
+ return 0;
+ }
+ /* display one transport per line on subsequent lines */
+ server = list_entry(v, struct nfs_server, master_link);
+ clp = server->nfs_client;
+
+ snprintf(dev, 8, "%u:%u",
+ MAJOR(server->s_dev), MINOR(server->s_dev));
+
+ snprintf(fsid, 17, "%llx:%llx",
+ (unsigned long long) server->fsid.major,
+ (unsigned long long) server->fsid.minor);
+
+ rcu_read_lock();
+ seq_printf(m, "v%u %s %s %-7s %-17s %s\n",
+ clp->rpc_ops->version,
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT),
+ dev,
+ fsid,
+ nfs_server_fscache_state(server));
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/*
+ * initialise the /proc/fs/nfsfs/ directory
+ */
+int __init nfs_fs_proc_init(void)
+{
+ struct proc_dir_entry *p;
+
+ proc_fs_nfs = proc_mkdir("fs/nfsfs", NULL);
+ if (!proc_fs_nfs)
+ goto error_0;
+
+ /* a file of servers with which we're dealing */
+ p = proc_create("servers", S_IFREG|S_IRUGO,
+ proc_fs_nfs, &nfs_server_list_fops);
+ if (!p)
+ goto error_1;
+
+ /* a file of volumes that we have mounted */
+ p = proc_create("volumes", S_IFREG|S_IRUGO,
+ proc_fs_nfs, &nfs_volume_list_fops);
+ if (!p)
+ goto error_2;
+ return 0;
+
+error_2:
+ remove_proc_entry("servers", proc_fs_nfs);
+error_1:
+ remove_proc_entry("fs/nfsfs", NULL);
+error_0:
+ return -ENOMEM;
+}
+
+/*
+ * clean up the /proc/fs/nfsfs/ directory
+ */
+void nfs_fs_proc_exit(void)
+{
+ remove_proc_entry("volumes", proc_fs_nfs);
+ remove_proc_entry("servers", proc_fs_nfs);
+ remove_proc_entry("fs/nfsfs", NULL);
+}
+
+#endif /* CONFIG_PROC_FS */
+
+module_param(nfs4_disable_idmapping, bool, 0644);
+MODULE_PARM_DESC(nfs4_disable_idmapping,
+ "Turn off NFSv4 idmapping when using 'sec=sys'");
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
new file mode 100644
index 00000000..89af1d26
--- /dev/null
+++ b/fs/nfs/delegation.c
@@ -0,0 +1,705 @@
+/*
+ * linux/fs/nfs/delegation.c
+ *
+ * Copyright (C) 2004 Trond Myklebust
+ *
+ * NFS file delegation management
+ *
+ */
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_xdr.h>
+
+#include "nfs4_fs.h"
+#include "delegation.h"
+#include "internal.h"
+
+static void nfs_free_delegation(struct nfs_delegation *delegation)
+{
+ if (delegation->cred) {
+ put_rpccred(delegation->cred);
+ delegation->cred = NULL;
+ }
+ kfree_rcu(delegation, rcu);
+}
+
+/**
+ * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
+ * @delegation: delegation to process
+ *
+ */
+void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
+{
+ set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
+}
+
+/**
+ * nfs_have_delegation - check if inode has a delegation
+ * @inode: inode to check
+ * @flags: delegation types to check for
+ *
+ * Returns one if inode has the indicated delegation, otherwise zero.
+ */
+int nfs_have_delegation(struct inode *inode, fmode_t flags)
+{
+ struct nfs_delegation *delegation;
+ int ret = 0;
+
+ flags &= FMODE_READ|FMODE_WRITE;
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation != NULL && (delegation->type & flags) == flags) {
+ nfs_mark_delegation_referenced(delegation);
+ ret = 1;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
+{
+ struct inode *inode = state->inode;
+ struct file_lock *fl;
+ int status = 0;
+
+ if (inode->i_flock == NULL)
+ goto out;
+
+ /* Protect inode->i_flock using the file locks lock */
+ lock_flocks();
+ for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
+ if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
+ continue;
+ if (nfs_file_open_context(fl->fl_file) != ctx)
+ continue;
+ unlock_flocks();
+ status = nfs4_lock_delegation_recall(state, fl);
+ if (status < 0)
+ goto out;
+ lock_flocks();
+ }
+ unlock_flocks();
+out:
+ return status;
+}
+
+static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_open_context *ctx;
+ struct nfs4_state *state;
+ int err;
+
+again:
+ spin_lock(&inode->i_lock);
+ list_for_each_entry(ctx, &nfsi->open_files, list) {
+ state = ctx->state;
+ if (state == NULL)
+ continue;
+ if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
+ continue;
+ if (!nfs4_stateid_match(&state->stateid, stateid))
+ continue;
+ get_nfs_open_context(ctx);
+ spin_unlock(&inode->i_lock);
+ err = nfs4_open_delegation_recall(ctx, state, stateid);
+ if (err >= 0)
+ err = nfs_delegation_claim_locks(ctx, state);
+ put_nfs_open_context(ctx);
+ if (err != 0)
+ return err;
+ goto again;
+ }
+ spin_unlock(&inode->i_lock);
+ return 0;
+}
+
+/**
+ * nfs_inode_reclaim_delegation - process a delegation reclaim request
+ * @inode: inode to process
+ * @cred: credential to use for request
+ * @res: new delegation state from server
+ *
+ */
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_openres *res)
+{
+ struct nfs_delegation *delegation;
+ struct rpc_cred *oldcred = NULL;
+
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation != NULL) {
+ spin_lock(&delegation->lock);
+ if (delegation->inode != NULL) {
+ nfs4_stateid_copy(&delegation->stateid, &res->delegation);
+ delegation->type = res->delegation_type;
+ delegation->maxsize = res->maxsize;
+ oldcred = delegation->cred;
+ delegation->cred = get_rpccred(cred);
+ clear_bit(NFS_DELEGATION_NEED_RECLAIM,
+ &delegation->flags);
+ NFS_I(inode)->delegation_state = delegation->type;
+ spin_unlock(&delegation->lock);
+ put_rpccred(oldcred);
+ rcu_read_unlock();
+ } else {
+ /* We appear to have raced with a delegation return. */
+ spin_unlock(&delegation->lock);
+ rcu_read_unlock();
+ nfs_inode_set_delegation(inode, cred, res);
+ }
+ } else {
+ rcu_read_unlock();
+ }
+}
+
+static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
+{
+ int res = 0;
+
+ res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
+ nfs_free_delegation(delegation);
+ return res;
+}
+
+static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
+{
+ struct inode *inode = NULL;
+
+ spin_lock(&delegation->lock);
+ if (delegation->inode != NULL)
+ inode = igrab(delegation->inode);
+ spin_unlock(&delegation->lock);
+ return inode;
+}
+
+static struct nfs_delegation *
+nfs_detach_delegation_locked(struct nfs_inode *nfsi,
+ struct nfs_server *server)
+{
+ struct nfs_delegation *delegation =
+ rcu_dereference_protected(nfsi->delegation,
+ lockdep_is_held(&server->nfs_client->cl_lock));
+
+ if (delegation == NULL)
+ goto nomatch;
+
+ spin_lock(&delegation->lock);
+ list_del_rcu(&delegation->super_list);
+ delegation->inode = NULL;
+ nfsi->delegation_state = 0;
+ rcu_assign_pointer(nfsi->delegation, NULL);
+ spin_unlock(&delegation->lock);
+ return delegation;
+nomatch:
+ return NULL;
+}
+
+static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
+ struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs_delegation *delegation;
+
+ spin_lock(&clp->cl_lock);
+ delegation = nfs_detach_delegation_locked(nfsi, server);
+ spin_unlock(&clp->cl_lock);
+ return delegation;
+}
+
+/**
+ * nfs_inode_set_delegation - set up a delegation on an inode
+ * @inode: inode to which delegation applies
+ * @cred: cred to use for subsequent delegation processing
+ * @res: new delegation state from server
+ *
+ * Returns zero on success, or a negative errno value.
+ */
+int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_delegation *delegation, *old_delegation;
+ struct nfs_delegation *freeme = NULL;
+ int status = 0;
+
+ delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
+ if (delegation == NULL)
+ return -ENOMEM;
+ nfs4_stateid_copy(&delegation->stateid, &res->delegation);
+ delegation->type = res->delegation_type;
+ delegation->maxsize = res->maxsize;
+ delegation->change_attr = inode->i_version;
+ delegation->cred = get_rpccred(cred);
+ delegation->inode = inode;
+ delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
+ spin_lock_init(&delegation->lock);
+
+ spin_lock(&clp->cl_lock);
+ old_delegation = rcu_dereference_protected(nfsi->delegation,
+ lockdep_is_held(&clp->cl_lock));
+ if (old_delegation != NULL) {
+ if (nfs4_stateid_match(&delegation->stateid,
+ &old_delegation->stateid) &&
+ delegation->type == old_delegation->type) {
+ goto out;
+ }
+ /*
+ * Deal with broken servers that hand out two
+ * delegations for the same file.
+ * Allow for upgrades to a WRITE delegation, but
+ * nothing else.
+ */
+ dfprintk(FILE, "%s: server %s handed out "
+ "a duplicate delegation!\n",
+ __func__, clp->cl_hostname);
+ if (delegation->type == old_delegation->type ||
+ !(delegation->type & FMODE_WRITE)) {
+ freeme = delegation;
+ delegation = NULL;
+ goto out;
+ }
+ freeme = nfs_detach_delegation_locked(nfsi, server);
+ }
+ list_add_rcu(&delegation->super_list, &server->delegations);
+ nfsi->delegation_state = delegation->type;
+ rcu_assign_pointer(nfsi->delegation, delegation);
+ delegation = NULL;
+
+ /* Ensure we revalidate the attributes and page cache! */
+ spin_lock(&inode->i_lock);
+ nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
+ spin_unlock(&inode->i_lock);
+
+out:
+ spin_unlock(&clp->cl_lock);
+ if (delegation != NULL)
+ nfs_free_delegation(delegation);
+ if (freeme != NULL)
+ nfs_do_return_delegation(inode, freeme, 0);
+ return status;
+}
+
+/*
+ * Basic procedure for returning a delegation to the server
+ */
+static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ int err;
+
+ /*
+ * Guard against new delegated open/lock/unlock calls and against
+ * state recovery
+ */
+ down_write(&nfsi->rwsem);
+ err = nfs_delegation_claim_opens(inode, &delegation->stateid);
+ up_write(&nfsi->rwsem);
+ if (err)
+ goto out;
+
+ err = nfs_do_return_delegation(inode, delegation, issync);
+out:
+ return err;
+}
+
+/**
+ * nfs_client_return_marked_delegations - return previously marked delegations
+ * @clp: nfs_client to process
+ *
+ * Returns zero on success, or a negative errno value.
+ */
+int nfs_client_return_marked_delegations(struct nfs_client *clp)
+{
+ struct nfs_delegation *delegation;
+ struct nfs_server *server;
+ struct inode *inode;
+ int err = 0;
+
+restart:
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ list_for_each_entry_rcu(delegation, &server->delegations,
+ super_list) {
+ if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
+ &delegation->flags))
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+ continue;
+ delegation = nfs_detach_delegation(NFS_I(inode),
+ server);
+ rcu_read_unlock();
+
+ if (delegation != NULL) {
+ filemap_flush(inode->i_mapping);
+ err = __nfs_inode_return_delegation(inode,
+ delegation, 0);
+ }
+ iput(inode);
+ if (!err)
+ goto restart;
+ set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+ return err;
+ }
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+/**
+ * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
+ * @inode: inode to process
+ *
+ * Does not protect against delegation reclaims, therefore really only safe
+ * to be called from nfs4_clear_inode().
+ */
+void nfs_inode_return_delegation_noreclaim(struct inode *inode)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_delegation *delegation;
+
+ if (rcu_access_pointer(nfsi->delegation) != NULL) {
+ delegation = nfs_detach_delegation(nfsi, server);
+ if (delegation != NULL)
+ nfs_do_return_delegation(inode, delegation, 0);
+ }
+}
+
+/**
+ * nfs_inode_return_delegation - synchronously return a delegation
+ * @inode: inode to process
+ *
+ * Returns zero on success, or a negative errno value.
+ */
+int nfs_inode_return_delegation(struct inode *inode)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_delegation *delegation;
+ int err = 0;
+
+ if (rcu_access_pointer(nfsi->delegation) != NULL) {
+ delegation = nfs_detach_delegation(nfsi, server);
+ if (delegation != NULL) {
+ nfs_wb_all(inode);
+ err = __nfs_inode_return_delegation(inode, delegation, 1);
+ }
+ }
+ return err;
+}
+
+static void nfs_mark_return_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
+{
+ set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+}
+
+/**
+ * nfs_super_return_all_delegations - return delegations for one superblock
+ * @sb: sb to process
+ *
+ */
+void nfs_super_return_all_delegations(struct super_block *sb)
+{
+ struct nfs_server *server = NFS_SB(sb);
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs_delegation *delegation;
+
+ if (clp == NULL)
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ spin_lock(&delegation->lock);
+ set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ spin_unlock(&delegation->lock);
+ }
+ rcu_read_unlock();
+
+ if (nfs_client_return_marked_delegations(clp) != 0)
+ nfs4_schedule_state_manager(clp);
+}
+
+static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
+ fmode_t flags)
+{
+ struct nfs_delegation *delegation;
+
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
+ continue;
+ if (delegation->type & flags)
+ nfs_mark_return_delegation(server, delegation);
+ }
+}
+
+static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
+ fmode_t flags)
+{
+ struct nfs_server *server;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs_mark_return_all_delegation_types(server, flags);
+ rcu_read_unlock();
+}
+
+static void nfs_delegation_run_state_manager(struct nfs_client *clp)
+{
+ if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
+ nfs4_schedule_state_manager(clp);
+}
+
+void nfs_remove_bad_delegation(struct inode *inode)
+{
+ struct nfs_delegation *delegation;
+
+ delegation = nfs_detach_delegation(NFS_I(inode), NFS_SERVER(inode));
+ if (delegation) {
+ nfs_inode_find_state_and_recover(inode, &delegation->stateid);
+ nfs_free_delegation(delegation);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
+
+/**
+ * nfs_expire_all_delegation_types
+ * @clp: client to process
+ * @flags: delegation types to expire
+ *
+ */
+void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
+{
+ nfs_client_mark_return_all_delegation_types(clp, flags);
+ nfs_delegation_run_state_manager(clp);
+}
+
+/**
+ * nfs_expire_all_delegations
+ * @clp: client to process
+ *
+ */
+void nfs_expire_all_delegations(struct nfs_client *clp)
+{
+ nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
+}
+
+static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
+{
+ struct nfs_delegation *delegation;
+
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
+ continue;
+ nfs_mark_return_delegation(server, delegation);
+ }
+}
+
+/**
+ * nfs_expire_unreferenced_delegations - Eliminate unused delegations
+ * @clp: nfs_client to process
+ *
+ */
+void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs_mark_return_unreferenced_delegations(server);
+ rcu_read_unlock();
+
+ nfs_delegation_run_state_manager(clp);
+}
+
+/**
+ * nfs_async_inode_return_delegation - asynchronously return a delegation
+ * @inode: inode to process
+ * @stateid: state ID information
+ *
+ * Returns zero on success, or a negative errno value.
+ */
+int nfs_async_inode_return_delegation(struct inode *inode,
+ const nfs4_stateid *stateid)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs_delegation *delegation;
+
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+
+ if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+ nfs_mark_return_delegation(server, delegation);
+ rcu_read_unlock();
+
+ nfs_delegation_run_state_manager(clp);
+ return 0;
+}
+
+static struct inode *
+nfs_delegation_find_inode_server(struct nfs_server *server,
+ const struct nfs_fh *fhandle)
+{
+ struct nfs_delegation *delegation;
+ struct inode *res = NULL;
+
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
+ spin_lock(&delegation->lock);
+ if (delegation->inode != NULL &&
+ nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
+ res = igrab(delegation->inode);
+ }
+ spin_unlock(&delegation->lock);
+ if (res != NULL)
+ break;
+ }
+ return res;
+}
+
+/**
+ * nfs_delegation_find_inode - retrieve the inode associated with a delegation
+ * @clp: client state handle
+ * @fhandle: filehandle from a delegation recall
+ *
+ * Returns pointer to inode matching "fhandle," or NULL if a matching inode
+ * cannot be found.
+ */
+struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
+ const struct nfs_fh *fhandle)
+{
+ struct nfs_server *server;
+ struct inode *res = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ res = nfs_delegation_find_inode_server(server, fhandle);
+ if (res != NULL)
+ break;
+ }
+ rcu_read_unlock();
+ return res;
+}
+
+static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
+{
+ struct nfs_delegation *delegation;
+
+ list_for_each_entry_rcu(delegation, &server->delegations, super_list)
+ set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+}
+
+/**
+ * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
+ * @clp: nfs_client to process
+ *
+ */
+void nfs_delegation_mark_reclaim(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs_delegation_mark_reclaim_server(server);
+ rcu_read_unlock();
+}
+
+/**
+ * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
+ * @clp: nfs_client to process
+ *
+ */
+void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
+{
+ struct nfs_delegation *delegation;
+ struct nfs_server *server;
+ struct inode *inode;
+
+restart:
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ list_for_each_entry_rcu(delegation, &server->delegations,
+ super_list) {
+ if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
+ &delegation->flags) == 0)
+ continue;
+ inode = nfs_delegation_grab_inode(delegation);
+ if (inode == NULL)
+ continue;
+ delegation = nfs_detach_delegation(NFS_I(inode),
+ server);
+ rcu_read_unlock();
+
+ if (delegation != NULL)
+ nfs_free_delegation(delegation);
+ iput(inode);
+ goto restart;
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * nfs_delegations_present - check for existence of delegations
+ * @clp: client state handle
+ *
+ * Returns one if there are any nfs_delegation structures attached
+ * to this nfs_client.
+ */
+int nfs_delegations_present(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ int ret = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ if (!list_empty(&server->delegations)) {
+ ret = 1;
+ break;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * nfs4_copy_delegation_stateid - Copy inode's state ID information
+ * @dst: stateid data structure to fill in
+ * @inode: inode to check
+ * @flags: delegation type requirement
+ *
+ * Returns "true" and fills in "dst->data" * if inode had a delegation,
+ * otherwise "false" is returned.
+ */
+bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
+ fmode_t flags)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_delegation *delegation;
+ bool ret;
+
+ flags &= FMODE_READ|FMODE_WRITE;
+ rcu_read_lock();
+ delegation = rcu_dereference(nfsi->delegation);
+ ret = (delegation != NULL && (delegation->type & flags) == flags);
+ if (ret) {
+ nfs4_stateid_copy(dst, &delegation->stateid);
+ nfs_mark_delegation_referenced(delegation);
+ }
+ rcu_read_unlock();
+ return ret;
+}
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
new file mode 100644
index 00000000..cd6a7a8d
--- /dev/null
+++ b/fs/nfs/delegation.h
@@ -0,0 +1,79 @@
+/*
+ * linux/fs/nfs/delegation.h
+ *
+ * Copyright (c) Trond Myklebust
+ *
+ * Definitions pertaining to NFS delegated files
+ */
+#ifndef FS_NFS_DELEGATION_H
+#define FS_NFS_DELEGATION_H
+
+#if defined(CONFIG_NFS_V4)
+/*
+ * NFSv4 delegation
+ */
+struct nfs_delegation {
+ struct list_head super_list;
+ struct rpc_cred *cred;
+ struct inode *inode;
+ nfs4_stateid stateid;
+ fmode_t type;
+ loff_t maxsize;
+ __u64 change_attr;
+ unsigned long flags;
+ spinlock_t lock;
+ struct rcu_head rcu;
+};
+
+enum {
+ NFS_DELEGATION_NEED_RECLAIM = 0,
+ NFS_DELEGATION_RETURN,
+ NFS_DELEGATION_REFERENCED,
+};
+
+int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
+int nfs_inode_return_delegation(struct inode *inode);
+int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
+void nfs_inode_return_delegation_noreclaim(struct inode *inode);
+
+struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
+void nfs_super_return_all_delegations(struct super_block *sb);
+void nfs_expire_all_delegations(struct nfs_client *clp);
+void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags);
+void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
+int nfs_client_return_marked_delegations(struct nfs_client *clp);
+int nfs_delegations_present(struct nfs_client *clp);
+void nfs_remove_bad_delegation(struct inode *inode);
+
+void nfs_delegation_mark_reclaim(struct nfs_client *clp);
+void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
+
+/* NFSv4 delegation-related procedures */
+int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
+int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
+int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl);
+bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
+
+void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
+int nfs_have_delegation(struct inode *inode, fmode_t flags);
+
+#else
+static inline int nfs_have_delegation(struct inode *inode, fmode_t flags)
+{
+ return 0;
+}
+
+static inline int nfs_inode_return_delegation(struct inode *inode)
+{
+ return 0;
+}
+#endif
+
+static inline int nfs_have_delegated_attributes(struct inode *inode)
+{
+ return nfs_have_delegation(inode, FMODE_READ) &&
+ !(NFS_I(inode)->cache_validity & NFS_INO_REVAL_FORCED);
+}
+
+#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
new file mode 100644
index 00000000..8789210c
--- /dev/null
+++ b/fs/nfs/dir.c
@@ -0,0 +1,2362 @@
+/*
+ * linux/fs/nfs/dir.c
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ *
+ * nfs directory handling functions
+ *
+ * 10 Apr 1996 Added silly rename for unlink --okir
+ * 28 Sep 1996 Improved directory cache --okir
+ * 23 Aug 1997 Claus Heine claus@momo.math.rwth-aachen.de
+ * Re-implemented silly rename for unlink, newly implemented
+ * silly rename for nfs_rename() following the suggestions
+ * of Olaf Kirch (okir) found in this file.
+ * Following Linus comments on my original hack, this version
+ * depends only on the dcache stuff and doesn't touch the inode
+ * layer (iput() and friends).
+ * 6 Jun 1999 Cache readdir lookups in the page cache. -DaveM
+ */
+
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/stat.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/pagemap.h>
+#include <linux/pagevec.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/sched.h>
+#include <linux/kmemleak.h>
+#include <linux/xattr.h>
+
+#include "delegation.h"
+#include "iostat.h"
+#include "internal.h"
+#include "fscache.h"
+
+/* #define NFS_DEBUG_VERBOSE 1 */
+
+static int nfs_opendir(struct inode *, struct file *);
+static int nfs_closedir(struct inode *, struct file *);
+static int nfs_readdir(struct file *, void *, filldir_t);
+static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *);
+static int nfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
+static int nfs_mkdir(struct inode *, struct dentry *, umode_t);
+static int nfs_rmdir(struct inode *, struct dentry *);
+static int nfs_unlink(struct inode *, struct dentry *);
+static int nfs_symlink(struct inode *, struct dentry *, const char *);
+static int nfs_link(struct dentry *, struct inode *, struct dentry *);
+static int nfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+static int nfs_rename(struct inode *, struct dentry *,
+ struct inode *, struct dentry *);
+static int nfs_fsync_dir(struct file *, loff_t, loff_t, int);
+static loff_t nfs_llseek_dir(struct file *, loff_t, int);
+static void nfs_readdir_clear_array(struct page*);
+
+const struct file_operations nfs_dir_operations = {
+ .llseek = nfs_llseek_dir,
+ .read = generic_read_dir,
+ .readdir = nfs_readdir,
+ .open = nfs_opendir,
+ .release = nfs_closedir,
+ .fsync = nfs_fsync_dir,
+};
+
+const struct inode_operations nfs_dir_inode_operations = {
+ .create = nfs_create,
+ .lookup = nfs_lookup,
+ .link = nfs_link,
+ .unlink = nfs_unlink,
+ .symlink = nfs_symlink,
+ .mkdir = nfs_mkdir,
+ .rmdir = nfs_rmdir,
+ .mknod = nfs_mknod,
+ .rename = nfs_rename,
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+};
+
+const struct address_space_operations nfs_dir_aops = {
+ .freepage = nfs_readdir_clear_array,
+};
+
+#ifdef CONFIG_NFS_V3
+const struct inode_operations nfs3_dir_inode_operations = {
+ .create = nfs_create,
+ .lookup = nfs_lookup,
+ .link = nfs_link,
+ .unlink = nfs_unlink,
+ .symlink = nfs_symlink,
+ .mkdir = nfs_mkdir,
+ .rmdir = nfs_rmdir,
+ .mknod = nfs_mknod,
+ .rename = nfs_rename,
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+ .listxattr = nfs3_listxattr,
+ .getxattr = nfs3_getxattr,
+ .setxattr = nfs3_setxattr,
+ .removexattr = nfs3_removexattr,
+};
+#endif /* CONFIG_NFS_V3 */
+
+#ifdef CONFIG_NFS_V4
+
+static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
+static int nfs_open_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd);
+const struct inode_operations nfs4_dir_inode_operations = {
+ .create = nfs_open_create,
+ .lookup = nfs_atomic_lookup,
+ .link = nfs_link,
+ .unlink = nfs_unlink,
+ .symlink = nfs_symlink,
+ .mkdir = nfs_mkdir,
+ .rmdir = nfs_rmdir,
+ .mknod = nfs_mknod,
+ .rename = nfs_rename,
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+ .getxattr = generic_getxattr,
+ .setxattr = generic_setxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+};
+
+#endif /* CONFIG_NFS_V4 */
+
+static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir, struct rpc_cred *cred)
+{
+ struct nfs_open_dir_context *ctx;
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (ctx != NULL) {
+ ctx->duped = 0;
+ ctx->attr_gencount = NFS_I(dir)->attr_gencount;
+ ctx->dir_cookie = 0;
+ ctx->dup_cookie = 0;
+ ctx->cred = get_rpccred(cred);
+ return ctx;
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static void put_nfs_open_dir_context(struct nfs_open_dir_context *ctx)
+{
+ put_rpccred(ctx->cred);
+ kfree(ctx);
+}
+
+/*
+ * Open file
+ */
+static int
+nfs_opendir(struct inode *inode, struct file *filp)
+{
+ int res = 0;
+ struct nfs_open_dir_context *ctx;
+ struct rpc_cred *cred;
+
+ dfprintk(FILE, "NFS: open dir(%s/%s)\n",
+ filp->f_path.dentry->d_parent->d_name.name,
+ filp->f_path.dentry->d_name.name);
+
+ nfs_inc_stats(inode, NFSIOS_VFSOPEN);
+
+ cred = rpc_lookup_cred();
+ if (IS_ERR(cred))
+ return PTR_ERR(cred);
+ ctx = alloc_nfs_open_dir_context(inode, cred);
+ if (IS_ERR(ctx)) {
+ res = PTR_ERR(ctx);
+ goto out;
+ }
+ filp->private_data = ctx;
+ if (filp->f_path.dentry == filp->f_path.mnt->mnt_root) {
+ /* This is a mountpoint, so d_revalidate will never
+ * have been called, so we need to refresh the
+ * inode (for close-open consistency) ourselves.
+ */
+ __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ }
+out:
+ put_rpccred(cred);
+ return res;
+}
+
+static int
+nfs_closedir(struct inode *inode, struct file *filp)
+{
+ put_nfs_open_dir_context(filp->private_data);
+ return 0;
+}
+
+struct nfs_cache_array_entry {
+ u64 cookie;
+ u64 ino;
+ struct qstr string;
+ unsigned char d_type;
+};
+
+struct nfs_cache_array {
+ int size;
+ int eof_index;
+ u64 last_cookie;
+ struct nfs_cache_array_entry array[0];
+};
+
+typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
+typedef struct {
+ struct file *file;
+ struct page *page;
+ unsigned long page_index;
+ u64 *dir_cookie;
+ u64 last_cookie;
+ loff_t current_index;
+ decode_dirent_t decode;
+
+ unsigned long timestamp;
+ unsigned long gencount;
+ unsigned int cache_entry_index;
+ unsigned int plus:1;
+ unsigned int eof:1;
+} nfs_readdir_descriptor_t;
+
+/*
+ * The caller is responsible for calling nfs_readdir_release_array(page)
+ */
+static
+struct nfs_cache_array *nfs_readdir_get_array(struct page *page)
+{
+ void *ptr;
+ if (page == NULL)
+ return ERR_PTR(-EIO);
+ ptr = kmap(page);
+ if (ptr == NULL)
+ return ERR_PTR(-ENOMEM);
+ return ptr;
+}
+
+static
+void nfs_readdir_release_array(struct page *page)
+{
+ kunmap(page);
+}
+
+/*
+ * we are freeing strings created by nfs_add_to_readdir_array()
+ */
+static
+void nfs_readdir_clear_array(struct page *page)
+{
+ struct nfs_cache_array *array;
+ int i;
+
+ array = kmap_atomic(page);
+ for (i = 0; i < array->size; i++)
+ kfree(array->array[i].string.name);
+ kunmap_atomic(array);
+}
+
+/*
+ * the caller is responsible for freeing qstr.name
+ * when called by nfs_readdir_add_to_array, the strings will be freed in
+ * nfs_clear_readdir_array()
+ */
+static
+int nfs_readdir_make_qstr(struct qstr *string, const char *name, unsigned int len)
+{
+ string->len = len;
+ string->name = kmemdup(name, len, GFP_KERNEL);
+ if (string->name == NULL)
+ return -ENOMEM;
+ /*
+ * Avoid a kmemleak false positive. The pointer to the name is stored
+ * in a page cache page which kmemleak does not scan.
+ */
+ kmemleak_not_leak(string->name);
+ string->hash = full_name_hash(name, len);
+ return 0;
+}
+
+static
+int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
+{
+ struct nfs_cache_array *array = nfs_readdir_get_array(page);
+ struct nfs_cache_array_entry *cache_entry;
+ int ret;
+
+ if (IS_ERR(array))
+ return PTR_ERR(array);
+
+ cache_entry = &array->array[array->size];
+
+ /* Check that this entry lies within the page bounds */
+ ret = -ENOSPC;
+ if ((char *)&cache_entry[1] - (char *)page_address(page) > PAGE_SIZE)
+ goto out;
+
+ cache_entry->cookie = entry->prev_cookie;
+ cache_entry->ino = entry->ino;
+ cache_entry->d_type = entry->d_type;
+ ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len);
+ if (ret)
+ goto out;
+ array->last_cookie = entry->cookie;
+ array->size++;
+ if (entry->eof != 0)
+ array->eof_index = array->size;
+out:
+ nfs_readdir_release_array(page);
+ return ret;
+}
+
+static
+int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
+{
+ loff_t diff = desc->file->f_pos - desc->current_index;
+ unsigned int index;
+
+ if (diff < 0)
+ goto out_eof;
+ if (diff >= array->size) {
+ if (array->eof_index >= 0)
+ goto out_eof;
+ return -EAGAIN;
+ }
+
+ index = (unsigned int)diff;
+ *desc->dir_cookie = array->array[index].cookie;
+ desc->cache_entry_index = index;
+ return 0;
+out_eof:
+ desc->eof = 1;
+ return -EBADCOOKIE;
+}
+
+static
+int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_descriptor_t *desc)
+{
+ int i;
+ loff_t new_pos;
+ int status = -EAGAIN;
+
+ for (i = 0; i < array->size; i++) {
+ if (array->array[i].cookie == *desc->dir_cookie) {
+ struct nfs_inode *nfsi = NFS_I(desc->file->f_path.dentry->d_inode);
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
+
+ new_pos = desc->current_index + i;
+ if (ctx->attr_gencount != nfsi->attr_gencount
+ || (nfsi->cache_validity & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))) {
+ ctx->duped = 0;
+ ctx->attr_gencount = nfsi->attr_gencount;
+ } else if (new_pos < desc->file->f_pos) {
+ if (ctx->duped > 0
+ && ctx->dup_cookie == *desc->dir_cookie) {
+ if (printk_ratelimit()) {
+ pr_notice("NFS: directory %s/%s contains a readdir loop."
+ "Please contact your server vendor. "
+ "The file: %s has duplicate cookie %llu\n",
+ desc->file->f_dentry->d_parent->d_name.name,
+ desc->file->f_dentry->d_name.name,
+ array->array[i].string.name,
+ *desc->dir_cookie);
+ }
+ status = -ELOOP;
+ goto out;
+ }
+ ctx->dup_cookie = *desc->dir_cookie;
+ ctx->duped = -1;
+ }
+ desc->file->f_pos = new_pos;
+ desc->cache_entry_index = i;
+ return 0;
+ }
+ }
+ if (array->eof_index >= 0) {
+ status = -EBADCOOKIE;
+ if (*desc->dir_cookie == array->last_cookie)
+ desc->eof = 1;
+ }
+out:
+ return status;
+}
+
+static
+int nfs_readdir_search_array(nfs_readdir_descriptor_t *desc)
+{
+ struct nfs_cache_array *array;
+ int status;
+
+ array = nfs_readdir_get_array(desc->page);
+ if (IS_ERR(array)) {
+ status = PTR_ERR(array);
+ goto out;
+ }
+
+ if (*desc->dir_cookie == 0)
+ status = nfs_readdir_search_for_pos(array, desc);
+ else
+ status = nfs_readdir_search_for_cookie(array, desc);
+
+ if (status == -EAGAIN) {
+ desc->last_cookie = array->last_cookie;
+ desc->current_index += array->size;
+ desc->page_index++;
+ }
+ nfs_readdir_release_array(desc->page);
+out:
+ return status;
+}
+
+/* Fill a page with xdr information before transferring to the cache page */
+static
+int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
+ struct nfs_entry *entry, struct file *file, struct inode *inode)
+{
+ struct nfs_open_dir_context *ctx = file->private_data;
+ struct rpc_cred *cred = ctx->cred;
+ unsigned long timestamp, gencount;
+ int error;
+
+ again:
+ timestamp = jiffies;
+ gencount = nfs_inc_attr_generation_counter();
+ error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages,
+ NFS_SERVER(inode)->dtsize, desc->plus);
+ if (error < 0) {
+ /* We requested READDIRPLUS, but the server doesn't grok it */
+ if (error == -ENOTSUPP && desc->plus) {
+ NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
+ clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
+ desc->plus = 0;
+ goto again;
+ }
+ goto error;
+ }
+ desc->timestamp = timestamp;
+ desc->gencount = gencount;
+error:
+ return error;
+}
+
+static int xdr_decode(nfs_readdir_descriptor_t *desc,
+ struct nfs_entry *entry, struct xdr_stream *xdr)
+{
+ int error;
+
+ error = desc->decode(xdr, entry, desc->plus);
+ if (error)
+ return error;
+ entry->fattr->time_start = desc->timestamp;
+ entry->fattr->gencount = desc->gencount;
+ return 0;
+}
+
+static
+int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
+{
+ if (dentry->d_inode == NULL)
+ goto different;
+ if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0)
+ goto different;
+ return 1;
+different:
+ return 0;
+}
+
+static
+void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+{
+ struct qstr filename = {
+ .len = entry->len,
+ .name = entry->name,
+ };
+ struct dentry *dentry;
+ struct dentry *alias;
+ struct inode *dir = parent->d_inode;
+ struct inode *inode;
+
+ if (filename.name[0] == '.') {
+ if (filename.len == 1)
+ return;
+ if (filename.len == 2 && filename.name[1] == '.')
+ return;
+ }
+ filename.hash = full_name_hash(filename.name, filename.len);
+
+ dentry = d_lookup(parent, &filename);
+ if (dentry != NULL) {
+ if (nfs_same_file(dentry, entry)) {
+ nfs_refresh_inode(dentry->d_inode, entry->fattr);
+ goto out;
+ } else {
+ d_drop(dentry);
+ dput(dentry);
+ }
+ }
+
+ dentry = d_alloc(parent, &filename);
+ if (dentry == NULL)
+ return;
+
+ inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
+ if (IS_ERR(inode))
+ goto out;
+
+ alias = d_materialise_unique(dentry, inode);
+ if (IS_ERR(alias))
+ goto out;
+ else if (alias) {
+ nfs_set_verifier(alias, nfs_save_change_attribute(dir));
+ dput(alias);
+ } else
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+
+out:
+ dput(dentry);
+}
+
+/* Perform conversion from xdr to cache array */
+static
+int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
+ struct page **xdr_pages, struct page *page, unsigned int buflen)
+{
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ struct nfs_cache_array *array;
+ unsigned int count = 0;
+ int status;
+
+ scratch = alloc_page(GFP_KERNEL);
+ if (scratch == NULL)
+ return -ENOMEM;
+
+ xdr_init_decode_pages(&stream, &buf, xdr_pages, buflen);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ do {
+ status = xdr_decode(desc, entry, &stream);
+ if (status != 0) {
+ if (status == -EAGAIN)
+ status = 0;
+ break;
+ }
+
+ count++;
+
+ if (desc->plus != 0)
+ nfs_prime_dcache(desc->file->f_path.dentry, entry);
+
+ status = nfs_readdir_add_to_array(entry, page);
+ if (status != 0)
+ break;
+ } while (!entry->eof);
+
+ if (count == 0 || (status == -EBADCOOKIE && entry->eof != 0)) {
+ array = nfs_readdir_get_array(page);
+ if (!IS_ERR(array)) {
+ array->eof_index = array->size;
+ status = 0;
+ nfs_readdir_release_array(page);
+ } else
+ status = PTR_ERR(array);
+ }
+
+ put_page(scratch);
+ return status;
+}
+
+static
+void nfs_readdir_free_pagearray(struct page **pages, unsigned int npages)
+{
+ unsigned int i;
+ for (i = 0; i < npages; i++)
+ put_page(pages[i]);
+}
+
+static
+void nfs_readdir_free_large_page(void *ptr, struct page **pages,
+ unsigned int npages)
+{
+ nfs_readdir_free_pagearray(pages, npages);
+}
+
+/*
+ * nfs_readdir_large_page will allocate pages that must be freed with a call
+ * to nfs_readdir_free_large_page
+ */
+static
+int nfs_readdir_large_page(struct page **pages, unsigned int npages)
+{
+ unsigned int i;
+
+ for (i = 0; i < npages; i++) {
+ struct page *page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ goto out_freepages;
+ pages[i] = page;
+ }
+ return 0;
+
+out_freepages:
+ nfs_readdir_free_pagearray(pages, i);
+ return -ENOMEM;
+}
+
+static
+int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, struct inode *inode)
+{
+ struct page *pages[NFS_MAX_READDIR_PAGES];
+ void *pages_ptr = NULL;
+ struct nfs_entry entry;
+ struct file *file = desc->file;
+ struct nfs_cache_array *array;
+ int status = -ENOMEM;
+ unsigned int array_size = ARRAY_SIZE(pages);
+
+ entry.prev_cookie = 0;
+ entry.cookie = desc->last_cookie;
+ entry.eof = 0;
+ entry.fh = nfs_alloc_fhandle();
+ entry.fattr = nfs_alloc_fattr();
+ entry.server = NFS_SERVER(inode);
+ if (entry.fh == NULL || entry.fattr == NULL)
+ goto out;
+
+ array = nfs_readdir_get_array(page);
+ if (IS_ERR(array)) {
+ status = PTR_ERR(array);
+ goto out;
+ }
+ memset(array, 0, sizeof(struct nfs_cache_array));
+ array->eof_index = -1;
+
+ status = nfs_readdir_large_page(pages, array_size);
+ if (status < 0)
+ goto out_release_array;
+ do {
+ unsigned int pglen;
+ status = nfs_readdir_xdr_filler(pages, desc, &entry, file, inode);
+
+ if (status < 0)
+ break;
+ pglen = status;
+ status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
+ if (status < 0) {
+ if (status == -ENOSPC)
+ status = 0;
+ break;
+ }
+ } while (array->eof_index < 0);
+
+ nfs_readdir_free_large_page(pages_ptr, pages, array_size);
+out_release_array:
+ nfs_readdir_release_array(page);
+out:
+ nfs_free_fattr(entry.fattr);
+ nfs_free_fhandle(entry.fh);
+ return status;
+}
+
+/*
+ * Now we cache directories properly, by converting xdr information
+ * to an array that can be used for lookups later. This results in
+ * fewer cache pages, since we can store more information on each page.
+ * We only need to convert from xdr once so future lookups are much simpler
+ */
+static
+int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page)
+{
+ struct inode *inode = desc->file->f_path.dentry->d_inode;
+ int ret;
+
+ ret = nfs_readdir_xdr_to_array(desc, page, inode);
+ if (ret < 0)
+ goto error;
+ SetPageUptodate(page);
+
+ if (invalidate_inode_pages2_range(inode->i_mapping, page->index + 1, -1) < 0) {
+ /* Should never happen */
+ nfs_zap_mapping(inode, inode->i_mapping);
+ }
+ unlock_page(page);
+ return 0;
+ error:
+ unlock_page(page);
+ return ret;
+}
+
+static
+void cache_page_release(nfs_readdir_descriptor_t *desc)
+{
+ if (!desc->page->mapping)
+ nfs_readdir_clear_array(desc->page);
+ page_cache_release(desc->page);
+ desc->page = NULL;
+}
+
+static
+struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
+{
+ return read_cache_page(desc->file->f_path.dentry->d_inode->i_mapping,
+ desc->page_index, (filler_t *)nfs_readdir_filler, desc);
+}
+
+/*
+ * Returns 0 if desc->dir_cookie was found on page desc->page_index
+ */
+static
+int find_cache_page(nfs_readdir_descriptor_t *desc)
+{
+ int res;
+
+ desc->page = get_cache_page(desc);
+ if (IS_ERR(desc->page))
+ return PTR_ERR(desc->page);
+
+ res = nfs_readdir_search_array(desc);
+ if (res != 0)
+ cache_page_release(desc);
+ return res;
+}
+
+/* Search for desc->dir_cookie from the beginning of the page cache */
+static inline
+int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
+{
+ int res;
+
+ if (desc->page_index == 0) {
+ desc->current_index = 0;
+ desc->last_cookie = 0;
+ }
+ do {
+ res = find_cache_page(desc);
+ } while (res == -EAGAIN);
+ return res;
+}
+
+/*
+ * Once we've found the start of the dirent within a page: fill 'er up...
+ */
+static
+int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
+ filldir_t filldir)
+{
+ struct file *file = desc->file;
+ int i = 0;
+ int res = 0;
+ struct nfs_cache_array *array = NULL;
+ struct nfs_open_dir_context *ctx = file->private_data;
+
+ array = nfs_readdir_get_array(desc->page);
+ if (IS_ERR(array)) {
+ res = PTR_ERR(array);
+ goto out;
+ }
+
+ for (i = desc->cache_entry_index; i < array->size; i++) {
+ struct nfs_cache_array_entry *ent;
+
+ ent = &array->array[i];
+ if (filldir(dirent, ent->string.name, ent->string.len,
+ file->f_pos, nfs_compat_user_ino64(ent->ino),
+ ent->d_type) < 0) {
+ desc->eof = 1;
+ break;
+ }
+ file->f_pos++;
+ if (i < (array->size-1))
+ *desc->dir_cookie = array->array[i+1].cookie;
+ else
+ *desc->dir_cookie = array->last_cookie;
+ if (ctx->duped != 0)
+ ctx->duped = 1;
+ }
+ if (array->eof_index >= 0)
+ desc->eof = 1;
+
+ nfs_readdir_release_array(desc->page);
+out:
+ cache_page_release(desc);
+ dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
+ (unsigned long long)*desc->dir_cookie, res);
+ return res;
+}
+
+/*
+ * If we cannot find a cookie in our cache, we suspect that this is
+ * because it points to a deleted file, so we ask the server to return
+ * whatever it thinks is the next entry. We then feed this to filldir.
+ * If all goes well, we should then be able to find our way round the
+ * cache on the next call to readdir_search_pagecache();
+ *
+ * NOTE: we cannot add the anonymous page to the pagecache because
+ * the data it contains might not be page aligned. Besides,
+ * we should already have a complete representation of the
+ * directory in the page cache by the time we get here.
+ */
+static inline
+int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
+ filldir_t filldir)
+{
+ struct page *page = NULL;
+ int status;
+ struct inode *inode = desc->file->f_path.dentry->d_inode;
+ struct nfs_open_dir_context *ctx = desc->file->private_data;
+
+ dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n",
+ (unsigned long long)*desc->dir_cookie);
+
+ page = alloc_page(GFP_HIGHUSER);
+ if (!page) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ desc->page_index = 0;
+ desc->last_cookie = *desc->dir_cookie;
+ desc->page = page;
+ ctx->duped = 0;
+
+ status = nfs_readdir_xdr_to_array(desc, page, inode);
+ if (status < 0)
+ goto out_release;
+
+ status = nfs_do_filldir(desc, dirent, filldir);
+
+ out:
+ dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
+ __func__, status);
+ return status;
+ out_release:
+ cache_page_release(desc);
+ goto out;
+}
+
+/* The file offset position represents the dirent entry number. A
+ last cookie cache takes care of the common case of reading the
+ whole directory.
+ */
+static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ nfs_readdir_descriptor_t my_desc,
+ *desc = &my_desc;
+ struct nfs_open_dir_context *dir_ctx = filp->private_data;
+ int res;
+
+ dfprintk(FILE, "NFS: readdir(%s/%s) starting at cookie %llu\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (long long)filp->f_pos);
+ nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
+
+ /*
+ * filp->f_pos points to the dirent entry number.
+ * *desc->dir_cookie has the cookie for the next entry. We have
+ * to either find the entry with the appropriate number or
+ * revalidate the cookie.
+ */
+ memset(desc, 0, sizeof(*desc));
+
+ desc->file = filp;
+ desc->dir_cookie = &dir_ctx->dir_cookie;
+ desc->decode = NFS_PROTO(inode)->decode_dirent;
+ desc->plus = NFS_USE_READDIRPLUS(inode);
+
+ nfs_block_sillyrename(dentry);
+ res = nfs_revalidate_mapping(inode, filp->f_mapping);
+ if (res < 0)
+ goto out;
+
+ do {
+ res = readdir_search_pagecache(desc);
+
+ if (res == -EBADCOOKIE) {
+ res = 0;
+ /* This means either end of directory */
+ if (*desc->dir_cookie && desc->eof == 0) {
+ /* Or that the server has 'lost' a cookie */
+ res = uncached_readdir(desc, dirent, filldir);
+ if (res == 0)
+ continue;
+ }
+ break;
+ }
+ if (res == -ETOOSMALL && desc->plus) {
+ clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
+ nfs_zap_caches(inode);
+ desc->page_index = 0;
+ desc->plus = 0;
+ desc->eof = 0;
+ continue;
+ }
+ if (res < 0)
+ break;
+
+ res = nfs_do_filldir(desc, dirent, filldir);
+ if (res < 0)
+ break;
+ } while (!desc->eof);
+out:
+ nfs_unblock_sillyrename(dentry);
+ if (res > 0)
+ res = 0;
+ dfprintk(FILE, "NFS: readdir(%s/%s) returns %d\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ res);
+ return res;
+}
+
+static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ struct nfs_open_dir_context *dir_ctx = filp->private_data;
+
+ dfprintk(FILE, "NFS: llseek dir(%s/%s, %lld, %d)\n",
+ dentry->d_parent->d_name.name,
+ dentry->d_name.name,
+ offset, origin);
+
+ mutex_lock(&inode->i_mutex);
+ switch (origin) {
+ case 1:
+ offset += filp->f_pos;
+ case 0:
+ if (offset >= 0)
+ break;
+ default:
+ offset = -EINVAL;
+ goto out;
+ }
+ if (offset != filp->f_pos) {
+ filp->f_pos = offset;
+ dir_ctx->dir_cookie = 0;
+ dir_ctx->duped = 0;
+ }
+out:
+ mutex_unlock(&inode->i_mutex);
+ return offset;
+}
+
+/*
+ * All directory operations under NFS are synchronous, so fsync()
+ * is a dummy operation.
+ */
+static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
+ int datasync)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+
+ dfprintk(FILE, "NFS: fsync dir(%s/%s) datasync %d\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ datasync);
+
+ mutex_lock(&inode->i_mutex);
+ nfs_inc_stats(dentry->d_inode, NFSIOS_VFSFSYNC);
+ mutex_unlock(&inode->i_mutex);
+ return 0;
+}
+
+/**
+ * nfs_force_lookup_revalidate - Mark the directory as having changed
+ * @dir - pointer to directory inode
+ *
+ * This forces the revalidation code in nfs_lookup_revalidate() to do a
+ * full lookup on all child dentries of 'dir' whenever a change occurs
+ * on the server that might have invalidated our dcache.
+ *
+ * The caller should be holding dir->i_lock
+ */
+void nfs_force_lookup_revalidate(struct inode *dir)
+{
+ NFS_I(dir)->cache_change_attribute++;
+}
+
+/*
+ * A check for whether or not the parent directory has changed.
+ * In the case it has, we assume that the dentries are untrustworthy
+ * and may need to be looked up again.
+ */
+static int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
+{
+ if (IS_ROOT(dentry))
+ return 1;
+ if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
+ return 0;
+ if (!nfs_verify_change_attribute(dir, dentry->d_time))
+ return 0;
+ /* Revalidate nfsi->cache_change_attribute before we declare a match */
+ if (nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
+ return 0;
+ if (!nfs_verify_change_attribute(dir, dentry->d_time))
+ return 0;
+ return 1;
+}
+
+/*
+ * Return the intent data that applies to this particular path component
+ *
+ * Note that the current set of intents only apply to the very last
+ * component of the path and none of them is set before that last
+ * component.
+ */
+static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd,
+ unsigned int mask)
+{
+ return nd->flags & mask;
+}
+
+/*
+ * Use intent information to check whether or not we're going to do
+ * an O_EXCL create using this path component.
+ */
+static int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd)
+{
+ if (NFS_PROTO(dir)->version == 2)
+ return 0;
+ return nd && nfs_lookup_check_intent(nd, LOOKUP_EXCL);
+}
+
+/*
+ * Inode and filehandle revalidation for lookups.
+ *
+ * We force revalidation in the cases where the VFS sets LOOKUP_REVAL,
+ * or if the intent information indicates that we're about to open this
+ * particular file and the "nocto" mount flag is not set.
+ *
+ */
+static inline
+int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+
+ if (IS_AUTOMOUNT(inode))
+ return 0;
+ if (nd != NULL) {
+ /* VFS wants an on-the-wire revalidation */
+ if (nd->flags & LOOKUP_REVAL)
+ goto out_force;
+ /* This is an open(2) */
+ if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 &&
+ !(server->flags & NFS_MOUNT_NOCTO) &&
+ (S_ISREG(inode->i_mode) ||
+ S_ISDIR(inode->i_mode)))
+ goto out_force;
+ return 0;
+ }
+ return nfs_revalidate_inode(server, inode);
+out_force:
+ return __nfs_revalidate_inode(server, inode);
+}
+
+/*
+ * We judge how long we want to trust negative
+ * dentries by looking at the parent inode mtime.
+ *
+ * If parent mtime has changed, we revalidate, else we wait for a
+ * period corresponding to the parent's attribute cache timeout value.
+ */
+static inline
+int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
+{
+ /* Don't revalidate a negative dentry if we're creating a new file */
+ if (nd != NULL && nfs_lookup_check_intent(nd, LOOKUP_CREATE) != 0)
+ return 0;
+ if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG)
+ return 1;
+ return !nfs_check_verifier(dir, dentry);
+}
+
+/*
+ * This is called every time the dcache has a lookup hit,
+ * and we should check whether we can really trust that
+ * lookup.
+ *
+ * NOTE! The hit can be a negative hit too, don't assume
+ * we have an inode!
+ *
+ * If the parent directory is seen to have changed, we throw out the
+ * cached dentry and do a new lookup.
+ */
+static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ struct inode *dir;
+ struct inode *inode;
+ struct dentry *parent;
+ struct nfs_fh *fhandle = NULL;
+ struct nfs_fattr *fattr = NULL;
+ int error;
+
+ if (nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ parent = dget_parent(dentry);
+ dir = parent->d_inode;
+ nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
+ inode = dentry->d_inode;
+
+ if (!inode) {
+ if (nfs_neg_need_reval(dir, dentry, nd))
+ goto out_bad;
+ goto out_valid;
+ }
+
+ if (is_bad_inode(inode)) {
+ dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
+ __func__, dentry->d_parent->d_name.name,
+ dentry->d_name.name);
+ goto out_bad;
+ }
+
+ if (nfs_have_delegation(inode, FMODE_READ))
+ goto out_set_verifier;
+
+ /* Force a full look up iff the parent directory has changed */
+ if (!nfs_is_exclusive_create(dir, nd) && nfs_check_verifier(dir, dentry)) {
+ if (nfs_lookup_verify_inode(inode, nd))
+ goto out_zap_parent;
+ goto out_valid;
+ }
+
+ if (NFS_STALE(inode))
+ goto out_bad;
+
+ error = -ENOMEM;
+ fhandle = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fhandle == NULL || fattr == NULL)
+ goto out_error;
+
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ if (error)
+ goto out_bad;
+ if (nfs_compare_fh(NFS_FH(inode), fhandle))
+ goto out_bad;
+ if ((error = nfs_refresh_inode(inode, fattr)) != 0)
+ goto out_bad;
+
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+out_set_verifier:
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ out_valid:
+ dput(parent);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
+ __func__, dentry->d_parent->d_name.name,
+ dentry->d_name.name);
+ return 1;
+out_zap_parent:
+ nfs_zap_caches(dir);
+ out_bad:
+ nfs_mark_for_revalidate(dir);
+ if (inode && S_ISDIR(inode->i_mode)) {
+ /* Purge readdir caches. */
+ nfs_zap_caches(inode);
+ /* If we have submounts, don't unhash ! */
+ if (have_submounts(dentry))
+ goto out_valid;
+ if (dentry->d_flags & DCACHE_DISCONNECTED)
+ goto out_valid;
+ shrink_dcache_parent(dentry);
+ }
+ d_drop(dentry);
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+ dput(parent);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
+ __func__, dentry->d_parent->d_name.name,
+ dentry->d_name.name);
+ return 0;
+out_error:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+ dput(parent);
+ dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) lookup returned error %d\n",
+ __func__, dentry->d_parent->d_name.name,
+ dentry->d_name.name, error);
+ return error;
+}
+
+/*
+ * This is called from dput() when d_count is going to 0.
+ */
+static int nfs_dentry_delete(const struct dentry *dentry)
+{
+ dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ dentry->d_flags);
+
+ /* Unhash any dentry with a stale inode */
+ if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode))
+ return 1;
+
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ /* Unhash it, so that ->d_iput() would be called */
+ return 1;
+ }
+ if (!(dentry->d_sb->s_flags & MS_ACTIVE)) {
+ /* Unhash it, so that ancestors of killed async unlink
+ * files will be cleaned up during umount */
+ return 1;
+ }
+ return 0;
+
+}
+
+static void nfs_drop_nlink(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ if (inode->i_nlink > 0)
+ drop_nlink(inode);
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Called when the dentry loses inode.
+ * We use it to clean up silly-renamed files.
+ */
+static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
+{
+ if (S_ISDIR(inode->i_mode))
+ /* drop any readdir cache as it could easily be old */
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
+
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ drop_nlink(inode);
+ nfs_complete_unlink(dentry, inode);
+ }
+ iput(inode);
+}
+
+static void nfs_d_release(struct dentry *dentry)
+{
+ /* free cached devname value, if it survived that far */
+ if (unlikely(dentry->d_fsdata)) {
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ WARN_ON(1);
+ else
+ kfree(dentry->d_fsdata);
+ }
+}
+
+const struct dentry_operations nfs_dentry_operations = {
+ .d_revalidate = nfs_lookup_revalidate,
+ .d_delete = nfs_dentry_delete,
+ .d_iput = nfs_dentry_iput,
+ .d_automount = nfs_d_automount,
+ .d_release = nfs_d_release,
+};
+
+static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+{
+ struct dentry *res;
+ struct dentry *parent;
+ struct inode *inode = NULL;
+ struct nfs_fh *fhandle = NULL;
+ struct nfs_fattr *fattr = NULL;
+ int error;
+
+ dfprintk(VFS, "NFS: lookup(%s/%s)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+ nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
+
+ res = ERR_PTR(-ENAMETOOLONG);
+ if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
+ goto out;
+
+ /*
+ * If we're doing an exclusive create, optimize away the lookup
+ * but don't hash the dentry.
+ */
+ if (nfs_is_exclusive_create(dir, nd)) {
+ d_instantiate(dentry, NULL);
+ res = NULL;
+ goto out;
+ }
+
+ res = ERR_PTR(-ENOMEM);
+ fhandle = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fhandle == NULL || fattr == NULL)
+ goto out;
+
+ parent = dentry->d_parent;
+ /* Protect against concurrent sillydeletes */
+ nfs_block_sillyrename(parent);
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ if (error == -ENOENT)
+ goto no_entry;
+ if (error < 0) {
+ res = ERR_PTR(error);
+ goto out_unblock_sillyrename;
+ }
+ inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
+ res = ERR_CAST(inode);
+ if (IS_ERR(res))
+ goto out_unblock_sillyrename;
+
+no_entry:
+ res = d_materialise_unique(dentry, inode);
+ if (res != NULL) {
+ if (IS_ERR(res))
+ goto out_unblock_sillyrename;
+ dentry = res;
+ }
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+out_unblock_sillyrename:
+ nfs_unblock_sillyrename(parent);
+out:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+ return res;
+}
+
+#ifdef CONFIG_NFS_V4
+static int nfs_open_revalidate(struct dentry *, struct nameidata *);
+
+const struct dentry_operations nfs4_dentry_operations = {
+ .d_revalidate = nfs_open_revalidate,
+ .d_delete = nfs_dentry_delete,
+ .d_iput = nfs_dentry_iput,
+ .d_automount = nfs_d_automount,
+ .d_release = nfs_d_release,
+};
+
+/*
+ * Use intent information to determine whether we need to substitute
+ * the NFSv4-style stateful OPEN for the LOOKUP call
+ */
+static int is_atomic_open(struct nameidata *nd)
+{
+ if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_OPEN) == 0)
+ return 0;
+ /* NFS does not (yet) have a stateful open for directories */
+ if (nd->flags & LOOKUP_DIRECTORY)
+ return 0;
+ /* Are we trying to write to a read only partition? */
+ if (__mnt_is_readonly(nd->path.mnt) &&
+ (nd->intent.open.flags & (O_CREAT|O_TRUNC|O_ACCMODE)))
+ return 0;
+ return 1;
+}
+
+static fmode_t flags_to_mode(int flags)
+{
+ fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
+ if ((flags & O_ACCMODE) != O_WRONLY)
+ res |= FMODE_READ;
+ if ((flags & O_ACCMODE) != O_RDONLY)
+ res |= FMODE_WRITE;
+ return res;
+}
+
+static struct nfs_open_context *create_nfs_open_context(struct dentry *dentry, int open_flags)
+{
+ return alloc_nfs_open_context(dentry, flags_to_mode(open_flags));
+}
+
+static int do_open(struct inode *inode, struct file *filp)
+{
+ nfs_fscache_set_inode_cookie(inode, filp);
+ return 0;
+}
+
+static int nfs_intent_set_file(struct nameidata *nd, struct nfs_open_context *ctx)
+{
+ struct file *filp;
+ int ret = 0;
+
+ /* If the open_intent is for execute, we have an extra check to make */
+ if (ctx->mode & FMODE_EXEC) {
+ ret = nfs_may_open(ctx->dentry->d_inode,
+ ctx->cred,
+ nd->intent.open.flags);
+ if (ret < 0)
+ goto out;
+ }
+ filp = lookup_instantiate_filp(nd, ctx->dentry, do_open);
+ if (IS_ERR(filp))
+ ret = PTR_ERR(filp);
+ else
+ nfs_file_set_open_context(filp, ctx);
+out:
+ put_nfs_open_context(ctx);
+ return ret;
+}
+
+static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+{
+ struct nfs_open_context *ctx;
+ struct iattr attr;
+ struct dentry *res = NULL;
+ struct inode *inode;
+ int open_flags;
+ int err;
+
+ dfprintk(VFS, "NFS: atomic_lookup(%s/%ld), %s\n",
+ dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+
+ /* Check that we are indeed trying to open this file */
+ if (!is_atomic_open(nd))
+ goto no_open;
+
+ if (dentry->d_name.len > NFS_SERVER(dir)->namelen) {
+ res = ERR_PTR(-ENAMETOOLONG);
+ goto out;
+ }
+
+ /* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash
+ * the dentry. */
+ if (nd->flags & LOOKUP_EXCL) {
+ d_instantiate(dentry, NULL);
+ goto out;
+ }
+
+ open_flags = nd->intent.open.flags;
+ attr.ia_valid = ATTR_OPEN;
+
+ ctx = create_nfs_open_context(dentry, open_flags);
+ res = ERR_CAST(ctx);
+ if (IS_ERR(ctx))
+ goto out;
+
+ if (nd->flags & LOOKUP_CREATE) {
+ attr.ia_mode = nd->intent.open.create_mode;
+ attr.ia_valid |= ATTR_MODE;
+ attr.ia_mode &= ~current_umask();
+ } else
+ open_flags &= ~(O_EXCL | O_CREAT);
+
+ if (open_flags & O_TRUNC) {
+ attr.ia_valid |= ATTR_SIZE;
+ attr.ia_size = 0;
+ }
+
+ /* Open the file on the server */
+ nfs_block_sillyrename(dentry->d_parent);
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr);
+ if (IS_ERR(inode)) {
+ nfs_unblock_sillyrename(dentry->d_parent);
+ put_nfs_open_context(ctx);
+ switch (PTR_ERR(inode)) {
+ /* Make a negative dentry */
+ case -ENOENT:
+ d_add(dentry, NULL);
+ res = NULL;
+ goto out;
+ /* This turned out not to be a regular file */
+ case -EISDIR:
+ case -ENOTDIR:
+ goto no_open;
+ case -ELOOP:
+ if (!(nd->intent.open.flags & O_NOFOLLOW))
+ goto no_open;
+ /* case -EINVAL: */
+ default:
+ res = ERR_CAST(inode);
+ goto out;
+ }
+ }
+ res = d_add_unique(dentry, inode);
+ nfs_unblock_sillyrename(dentry->d_parent);
+ if (res != NULL) {
+ dput(ctx->dentry);
+ ctx->dentry = dget(res);
+ dentry = res;
+ }
+ err = nfs_intent_set_file(nd, ctx);
+ if (err < 0) {
+ if (res != NULL)
+ dput(res);
+ return ERR_PTR(err);
+ }
+out:
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ return res;
+no_open:
+ return nfs_lookup(dir, dentry, nd);
+}
+
+static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ struct dentry *parent = NULL;
+ struct inode *inode;
+ struct inode *dir;
+ struct nfs_open_context *ctx;
+ struct iattr attr;
+ int openflags, ret = 0;
+
+ if (nd->flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ inode = dentry->d_inode;
+ if (!is_atomic_open(nd) || d_mountpoint(dentry))
+ goto no_open;
+
+ parent = dget_parent(dentry);
+ dir = parent->d_inode;
+
+ /* We can't create new files in nfs_open_revalidate(), so we
+ * optimize away revalidation of negative dentries.
+ */
+ if (inode == NULL) {
+ if (!nfs_neg_need_reval(dir, dentry, nd))
+ ret = 1;
+ goto out;
+ }
+
+ /* NFS only supports OPEN on regular files */
+ if (!S_ISREG(inode->i_mode))
+ goto no_open_dput;
+ openflags = nd->intent.open.flags;
+ /* We cannot do exclusive creation on a positive dentry */
+ if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
+ goto no_open_dput;
+ /* We can't create new files here */
+ openflags &= ~(O_CREAT|O_EXCL);
+
+ ctx = create_nfs_open_context(dentry, openflags);
+ ret = PTR_ERR(ctx);
+ if (IS_ERR(ctx))
+ goto out;
+
+ attr.ia_valid = ATTR_OPEN;
+ if (openflags & O_TRUNC) {
+ attr.ia_valid |= ATTR_SIZE;
+ attr.ia_size = 0;
+ nfs_wb_all(inode);
+ }
+
+ /*
+ * Note: we're not holding inode->i_mutex and so may be racing with
+ * operations that change the directory. We therefore save the
+ * change attribute *before* we do the RPC call.
+ */
+ inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
+ if (IS_ERR(inode)) {
+ ret = PTR_ERR(inode);
+ switch (ret) {
+ case -EPERM:
+ case -EACCES:
+ case -EDQUOT:
+ case -ENOSPC:
+ case -EROFS:
+ goto out_put_ctx;
+ default:
+ goto out_drop;
+ }
+ }
+ iput(inode);
+ if (inode != dentry->d_inode)
+ goto out_drop;
+
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ ret = nfs_intent_set_file(nd, ctx);
+ if (ret >= 0)
+ ret = 1;
+out:
+ dput(parent);
+ return ret;
+out_drop:
+ d_drop(dentry);
+ ret = 0;
+out_put_ctx:
+ put_nfs_open_context(ctx);
+ goto out;
+
+no_open_dput:
+ dput(parent);
+no_open:
+ return nfs_lookup_revalidate(dentry, nd);
+}
+
+static int nfs_open_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, struct nameidata *nd)
+{
+ struct nfs_open_context *ctx = NULL;
+ struct iattr attr;
+ int error;
+ int open_flags = O_CREAT|O_EXCL;
+
+ dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
+ dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+
+ attr.ia_mode = mode;
+ attr.ia_valid = ATTR_MODE;
+
+ if (nd)
+ open_flags = nd->intent.open.flags;
+
+ ctx = create_nfs_open_context(dentry, open_flags);
+ error = PTR_ERR(ctx);
+ if (IS_ERR(ctx))
+ goto out_err_drop;
+
+ error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, ctx);
+ if (error != 0)
+ goto out_put_ctx;
+ if (nd) {
+ error = nfs_intent_set_file(nd, ctx);
+ if (error < 0)
+ goto out_err;
+ } else {
+ put_nfs_open_context(ctx);
+ }
+ return 0;
+out_put_ctx:
+ put_nfs_open_context(ctx);
+out_err_drop:
+ d_drop(dentry);
+out_err:
+ return error;
+}
+
+#endif /* CONFIG_NFSV4 */
+
+/*
+ * Code common to create, mkdir, and mknod.
+ */
+int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
+{
+ struct dentry *parent = dget_parent(dentry);
+ struct inode *dir = parent->d_inode;
+ struct inode *inode;
+ int error = -EACCES;
+
+ d_drop(dentry);
+
+ /* We may have been initialized further down */
+ if (dentry->d_inode)
+ goto out;
+ if (fhandle->size == 0) {
+ error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr);
+ if (error)
+ goto out_error;
+ }
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ if (!(fattr->valid & NFS_ATTR_FATTR)) {
+ struct nfs_server *server = NFS_SB(dentry->d_sb);
+ error = server->nfs_client->rpc_ops->getattr(server, fhandle, fattr);
+ if (error < 0)
+ goto out_error;
+ }
+ inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
+ error = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto out_error;
+ d_add(dentry, inode);
+out:
+ dput(parent);
+ return 0;
+out_error:
+ nfs_mark_for_revalidate(dir);
+ dput(parent);
+ return error;
+}
+
+/*
+ * Following a failed create operation, we drop the dentry rather
+ * than retain a negative dentry. This avoids a problem in the event
+ * that the operation succeeded on the server, but an error in the
+ * reply path made it appear to have failed.
+ */
+static int nfs_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode, struct nameidata *nd)
+{
+ struct iattr attr;
+ int error;
+ int open_flags = O_CREAT|O_EXCL;
+
+ dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
+ dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+
+ attr.ia_mode = mode;
+ attr.ia_valid = ATTR_MODE;
+
+ if (nd)
+ open_flags = nd->intent.open.flags;
+
+ error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL);
+ if (error != 0)
+ goto out_err;
+ return 0;
+out_err:
+ d_drop(dentry);
+ return error;
+}
+
+/*
+ * See comments for nfs_proc_create regarding failed operations.
+ */
+static int
+nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev)
+{
+ struct iattr attr;
+ int status;
+
+ dfprintk(VFS, "NFS: mknod(%s/%ld), %s\n",
+ dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+
+ if (!new_valid_dev(rdev))
+ return -EINVAL;
+
+ attr.ia_mode = mode;
+ attr.ia_valid = ATTR_MODE;
+
+ status = NFS_PROTO(dir)->mknod(dir, dentry, &attr, rdev);
+ if (status != 0)
+ goto out_err;
+ return 0;
+out_err:
+ d_drop(dentry);
+ return status;
+}
+
+/*
+ * See comments for nfs_proc_create regarding failed operations.
+ */
+static int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct iattr attr;
+ int error;
+
+ dfprintk(VFS, "NFS: mkdir(%s/%ld), %s\n",
+ dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+
+ attr.ia_valid = ATTR_MODE;
+ attr.ia_mode = mode | S_IFDIR;
+
+ error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
+ if (error != 0)
+ goto out_err;
+ return 0;
+out_err:
+ d_drop(dentry);
+ return error;
+}
+
+static void nfs_dentry_handle_enoent(struct dentry *dentry)
+{
+ if (dentry->d_inode != NULL && !d_unhashed(dentry))
+ d_delete(dentry);
+}
+
+static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ int error;
+
+ dfprintk(VFS, "NFS: rmdir(%s/%ld), %s\n",
+ dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
+
+ error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
+ /* Ensure the VFS deletes this inode */
+ if (error == 0 && dentry->d_inode != NULL)
+ clear_nlink(dentry->d_inode);
+ else if (error == -ENOENT)
+ nfs_dentry_handle_enoent(dentry);
+
+ return error;
+}
+
+/*
+ * Remove a file after making sure there are no pending writes,
+ * and after checking that the file has only one user.
+ *
+ * We invalidate the attribute cache and free the inode prior to the operation
+ * to avoid possible races if the server reuses the inode.
+ */
+static int nfs_safe_remove(struct dentry *dentry)
+{
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct inode *inode = dentry->d_inode;
+ int error = -EBUSY;
+
+ dfprintk(VFS, "NFS: safe_remove(%s/%s)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+ /* If the dentry was sillyrenamed, we simply call d_delete() */
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ error = 0;
+ goto out;
+ }
+
+ if (inode != NULL) {
+ nfs_inode_return_delegation(inode);
+ error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
+ /* The VFS may want to delete this inode */
+ if (error == 0)
+ nfs_drop_nlink(inode);
+ nfs_mark_for_revalidate(inode);
+ } else
+ error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
+ if (error == -ENOENT)
+ nfs_dentry_handle_enoent(dentry);
+out:
+ return error;
+}
+
+/* We do silly rename. In case sillyrename() returns -EBUSY, the inode
+ * belongs to an active ".nfs..." file and we return -EBUSY.
+ *
+ * If sillyrename() returns 0, we do nothing, otherwise we unlink.
+ */
+static int nfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ int error;
+ int need_rehash = 0;
+
+ dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
+ dir->i_ino, dentry->d_name.name);
+
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_count > 1) {
+ spin_unlock(&dentry->d_lock);
+ /* Start asynchronous writeout of the inode */
+ write_inode_now(dentry->d_inode, 0);
+ error = nfs_sillyrename(dir, dentry);
+ return error;
+ }
+ if (!d_unhashed(dentry)) {
+ __d_drop(dentry);
+ need_rehash = 1;
+ }
+ spin_unlock(&dentry->d_lock);
+ error = nfs_safe_remove(dentry);
+ if (!error || error == -ENOENT) {
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ } else if (need_rehash)
+ d_rehash(dentry);
+ return error;
+}
+
+/*
+ * To create a symbolic link, most file systems instantiate a new inode,
+ * add a page to it containing the path, then write it out to the disk
+ * using prepare_write/commit_write.
+ *
+ * Unfortunately the NFS client can't create the in-core inode first
+ * because it needs a file handle to create an in-core inode (see
+ * fs/nfs/inode.c:nfs_fhget). We only have a file handle *after* the
+ * symlink request has completed on the server.
+ *
+ * So instead we allocate a raw page, copy the symname into it, then do
+ * the SYMLINK request with the page as the buffer. If it succeeds, we
+ * now have a new file handle and can instantiate an in-core NFS inode
+ * and move the raw page into its mapping.
+ */
+static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
+{
+ struct pagevec lru_pvec;
+ struct page *page;
+ char *kaddr;
+ struct iattr attr;
+ unsigned int pathlen = strlen(symname);
+ int error;
+
+ dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id,
+ dir->i_ino, dentry->d_name.name, symname);
+
+ if (pathlen > PAGE_SIZE)
+ return -ENAMETOOLONG;
+
+ attr.ia_mode = S_IFLNK | S_IRWXUGO;
+ attr.ia_valid = ATTR_MODE;
+
+ page = alloc_page(GFP_HIGHUSER);
+ if (!page)
+ return -ENOMEM;
+
+ kaddr = kmap_atomic(page);
+ memcpy(kaddr, symname, pathlen);
+ if (pathlen < PAGE_SIZE)
+ memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
+ kunmap_atomic(kaddr);
+
+ error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
+ if (error != 0) {
+ dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s) error %d\n",
+ dir->i_sb->s_id, dir->i_ino,
+ dentry->d_name.name, symname, error);
+ d_drop(dentry);
+ __free_page(page);
+ return error;
+ }
+
+ /*
+ * No big deal if we can't add this page to the page cache here.
+ * READLINK will get the missing page from the server if needed.
+ */
+ pagevec_init(&lru_pvec, 0);
+ if (!add_to_page_cache(page, dentry->d_inode->i_mapping, 0,
+ GFP_KERNEL)) {
+ pagevec_add(&lru_pvec, page);
+ pagevec_lru_add_file(&lru_pvec);
+ SetPageUptodate(page);
+ unlock_page(page);
+ } else
+ __free_page(page);
+
+ return 0;
+}
+
+static int
+nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+ int error;
+
+ dfprintk(VFS, "NFS: link(%s/%s -> %s/%s)\n",
+ old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+ nfs_inode_return_delegation(inode);
+
+ d_drop(dentry);
+ error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
+ if (error == 0) {
+ ihold(inode);
+ d_add(dentry, inode);
+ }
+ return error;
+}
+
+/*
+ * RENAME
+ * FIXME: Some nfsds, like the Linux user space nfsd, may generate a
+ * different file handle for the same inode after a rename (e.g. when
+ * moving to a different directory). A fail-safe method to do so would
+ * be to look up old_dir/old_name, create a link to new_dir/new_name and
+ * rename the old file using the sillyrename stuff. This way, the original
+ * file in old_dir will go away when the last process iput()s the inode.
+ *
+ * FIXED.
+ *
+ * It actually works quite well. One needs to have the possibility for
+ * at least one ".nfs..." file in each directory the file ever gets
+ * moved or linked to which happens automagically with the new
+ * implementation that only depends on the dcache stuff instead of
+ * using the inode layer
+ *
+ * Unfortunately, things are a little more complicated than indicated
+ * above. For a cross-directory move, we want to make sure we can get
+ * rid of the old inode after the operation. This means there must be
+ * no pending writes (if it's a file), and the use count must be 1.
+ * If these conditions are met, we can drop the dentries before doing
+ * the rename.
+ */
+static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct inode *old_inode = old_dentry->d_inode;
+ struct inode *new_inode = new_dentry->d_inode;
+ struct dentry *dentry = NULL, *rehash = NULL;
+ int error = -EBUSY;
+
+ dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
+ old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
+ new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
+ new_dentry->d_count);
+
+ /*
+ * For non-directories, check whether the target is busy and if so,
+ * make a copy of the dentry and then do a silly-rename. If the
+ * silly-rename succeeds, the copied dentry is hashed and becomes
+ * the new target.
+ */
+ if (new_inode && !S_ISDIR(new_inode->i_mode)) {
+ /*
+ * To prevent any new references to the target during the
+ * rename, we unhash the dentry in advance.
+ */
+ if (!d_unhashed(new_dentry)) {
+ d_drop(new_dentry);
+ rehash = new_dentry;
+ }
+
+ if (new_dentry->d_count > 2) {
+ int err;
+
+ /* copy the target dentry's name */
+ dentry = d_alloc(new_dentry->d_parent,
+ &new_dentry->d_name);
+ if (!dentry)
+ goto out;
+
+ /* silly-rename the existing target ... */
+ err = nfs_sillyrename(new_dir, new_dentry);
+ if (err)
+ goto out;
+
+ new_dentry = dentry;
+ rehash = NULL;
+ new_inode = NULL;
+ }
+ }
+
+ nfs_inode_return_delegation(old_inode);
+ if (new_inode != NULL)
+ nfs_inode_return_delegation(new_inode);
+
+ error = NFS_PROTO(old_dir)->rename(old_dir, &old_dentry->d_name,
+ new_dir, &new_dentry->d_name);
+ nfs_mark_for_revalidate(old_inode);
+out:
+ if (rehash)
+ d_rehash(rehash);
+ if (!error) {
+ if (new_inode != NULL)
+ nfs_drop_nlink(new_inode);
+ d_move(old_dentry, new_dentry);
+ nfs_set_verifier(new_dentry,
+ nfs_save_change_attribute(new_dir));
+ } else if (error == -ENOENT)
+ nfs_dentry_handle_enoent(old_dentry);
+
+ /* new dentry created? */
+ if (dentry)
+ dput(dentry);
+ return error;
+}
+
+static DEFINE_SPINLOCK(nfs_access_lru_lock);
+static LIST_HEAD(nfs_access_lru_list);
+static atomic_long_t nfs_access_nr_entries;
+
+static void nfs_access_free_entry(struct nfs_access_entry *entry)
+{
+ put_rpccred(entry->cred);
+ kfree(entry);
+ smp_mb__before_atomic_dec();
+ atomic_long_dec(&nfs_access_nr_entries);
+ smp_mb__after_atomic_dec();
+}
+
+static void nfs_access_free_list(struct list_head *head)
+{
+ struct nfs_access_entry *cache;
+
+ while (!list_empty(head)) {
+ cache = list_entry(head->next, struct nfs_access_entry, lru);
+ list_del(&cache->lru);
+ nfs_access_free_entry(cache);
+ }
+}
+
+int nfs_access_cache_shrinker(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ LIST_HEAD(head);
+ struct nfs_inode *nfsi, *next;
+ struct nfs_access_entry *cache;
+ int nr_to_scan = sc->nr_to_scan;
+ gfp_t gfp_mask = sc->gfp_mask;
+
+ if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
+ return (nr_to_scan == 0) ? 0 : -1;
+
+ spin_lock(&nfs_access_lru_lock);
+ list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
+ struct inode *inode;
+
+ if (nr_to_scan-- == 0)
+ break;
+ inode = &nfsi->vfs_inode;
+ spin_lock(&inode->i_lock);
+ if (list_empty(&nfsi->access_cache_entry_lru))
+ goto remove_lru_entry;
+ cache = list_entry(nfsi->access_cache_entry_lru.next,
+ struct nfs_access_entry, lru);
+ list_move(&cache->lru, &head);
+ rb_erase(&cache->rb_node, &nfsi->access_cache);
+ if (!list_empty(&nfsi->access_cache_entry_lru))
+ list_move_tail(&nfsi->access_cache_inode_lru,
+ &nfs_access_lru_list);
+ else {
+remove_lru_entry:
+ list_del_init(&nfsi->access_cache_inode_lru);
+ smp_mb__before_clear_bit();
+ clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
+ smp_mb__after_clear_bit();
+ }
+ spin_unlock(&inode->i_lock);
+ }
+ spin_unlock(&nfs_access_lru_lock);
+ nfs_access_free_list(&head);
+ return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure;
+}
+
+static void __nfs_access_zap_cache(struct nfs_inode *nfsi, struct list_head *head)
+{
+ struct rb_root *root_node = &nfsi->access_cache;
+ struct rb_node *n;
+ struct nfs_access_entry *entry;
+
+ /* Unhook entries from the cache */
+ while ((n = rb_first(root_node)) != NULL) {
+ entry = rb_entry(n, struct nfs_access_entry, rb_node);
+ rb_erase(n, root_node);
+ list_move(&entry->lru, head);
+ }
+ nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
+}
+
+void nfs_access_zap_cache(struct inode *inode)
+{
+ LIST_HEAD(head);
+
+ if (test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags) == 0)
+ return;
+ /* Remove from global LRU init */
+ spin_lock(&nfs_access_lru_lock);
+ if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
+ list_del_init(&NFS_I(inode)->access_cache_inode_lru);
+
+ spin_lock(&inode->i_lock);
+ __nfs_access_zap_cache(NFS_I(inode), &head);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&nfs_access_lru_lock);
+ nfs_access_free_list(&head);
+}
+
+static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, struct rpc_cred *cred)
+{
+ struct rb_node *n = NFS_I(inode)->access_cache.rb_node;
+ struct nfs_access_entry *entry;
+
+ while (n != NULL) {
+ entry = rb_entry(n, struct nfs_access_entry, rb_node);
+
+ if (cred < entry->cred)
+ n = n->rb_left;
+ else if (cred > entry->cred)
+ n = n->rb_right;
+ else
+ return entry;
+ }
+ return NULL;
+}
+
+static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_access_entry *cache;
+ int err = -ENOENT;
+
+ spin_lock(&inode->i_lock);
+ if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
+ goto out_zap;
+ cache = nfs_access_search_rbtree(inode, cred);
+ if (cache == NULL)
+ goto out;
+ if (!nfs_have_delegated_attributes(inode) &&
+ !time_in_range_open(jiffies, cache->jiffies, cache->jiffies + nfsi->attrtimeo))
+ goto out_stale;
+ res->jiffies = cache->jiffies;
+ res->cred = cache->cred;
+ res->mask = cache->mask;
+ list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
+ err = 0;
+out:
+ spin_unlock(&inode->i_lock);
+ return err;
+out_stale:
+ rb_erase(&cache->rb_node, &nfsi->access_cache);
+ list_del(&cache->lru);
+ spin_unlock(&inode->i_lock);
+ nfs_access_free_entry(cache);
+ return -ENOENT;
+out_zap:
+ spin_unlock(&inode->i_lock);
+ nfs_access_zap_cache(inode);
+ return -ENOENT;
+}
+
+static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct rb_root *root_node = &nfsi->access_cache;
+ struct rb_node **p = &root_node->rb_node;
+ struct rb_node *parent = NULL;
+ struct nfs_access_entry *entry;
+
+ spin_lock(&inode->i_lock);
+ while (*p != NULL) {
+ parent = *p;
+ entry = rb_entry(parent, struct nfs_access_entry, rb_node);
+
+ if (set->cred < entry->cred)
+ p = &parent->rb_left;
+ else if (set->cred > entry->cred)
+ p = &parent->rb_right;
+ else
+ goto found;
+ }
+ rb_link_node(&set->rb_node, parent, p);
+ rb_insert_color(&set->rb_node, root_node);
+ list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
+ spin_unlock(&inode->i_lock);
+ return;
+found:
+ rb_replace_node(parent, &set->rb_node, root_node);
+ list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
+ list_del(&entry->lru);
+ spin_unlock(&inode->i_lock);
+ nfs_access_free_entry(entry);
+}
+
+static void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
+{
+ struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL);
+ if (cache == NULL)
+ return;
+ RB_CLEAR_NODE(&cache->rb_node);
+ cache->jiffies = set->jiffies;
+ cache->cred = get_rpccred(set->cred);
+ cache->mask = set->mask;
+
+ nfs_access_add_rbtree(inode, cache);
+
+ /* Update accounting */
+ smp_mb__before_atomic_inc();
+ atomic_long_inc(&nfs_access_nr_entries);
+ smp_mb__after_atomic_inc();
+
+ /* Add inode to global LRU list */
+ if (!test_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
+ spin_lock(&nfs_access_lru_lock);
+ if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags))
+ list_add_tail(&NFS_I(inode)->access_cache_inode_lru,
+ &nfs_access_lru_list);
+ spin_unlock(&nfs_access_lru_lock);
+ }
+}
+
+static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
+{
+ struct nfs_access_entry cache;
+ int status;
+
+ status = nfs_access_get_cached(inode, cred, &cache);
+ if (status == 0)
+ goto out;
+
+ /* Be clever: ask server to check for all possible rights */
+ cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ;
+ cache.cred = cred;
+ cache.jiffies = jiffies;
+ status = NFS_PROTO(inode)->access(inode, &cache);
+ if (status != 0) {
+ if (status == -ESTALE) {
+ nfs_zap_caches(inode);
+ if (!S_ISDIR(inode->i_mode))
+ set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+ }
+ return status;
+ }
+ nfs_access_add_cache(inode, &cache);
+out:
+ if ((mask & ~cache.mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
+ return 0;
+ return -EACCES;
+}
+
+static int nfs_open_permission_mask(int openflags)
+{
+ int mask = 0;
+
+ if ((openflags & O_ACCMODE) != O_WRONLY)
+ mask |= MAY_READ;
+ if ((openflags & O_ACCMODE) != O_RDONLY)
+ mask |= MAY_WRITE;
+ if (openflags & __FMODE_EXEC)
+ mask |= MAY_EXEC;
+ return mask;
+}
+
+int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+{
+ return nfs_do_access(inode, cred, nfs_open_permission_mask(openflags));
+}
+
+int nfs_permission(struct inode *inode, int mask)
+{
+ struct rpc_cred *cred;
+ int res = 0;
+
+ if (mask & MAY_NOT_BLOCK)
+ return -ECHILD;
+
+ nfs_inc_stats(inode, NFSIOS_VFSACCESS);
+
+ if ((mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
+ goto out;
+ /* Is this sys_access() ? */
+ if (mask & (MAY_ACCESS | MAY_CHDIR))
+ goto force_lookup;
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFLNK:
+ goto out;
+ case S_IFREG:
+ /* NFSv4 has atomic_open... */
+ if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
+ && (mask & MAY_OPEN)
+ && !(mask & MAY_EXEC))
+ goto out;
+ break;
+ case S_IFDIR:
+ /*
+ * Optimize away all write operations, since the server
+ * will check permissions when we perform the op.
+ */
+ if ((mask & MAY_WRITE) && !(mask & MAY_READ))
+ goto out;
+ }
+
+force_lookup:
+ if (!NFS_PROTO(inode)->access)
+ goto out_notsup;
+
+ cred = rpc_lookup_cred();
+ if (!IS_ERR(cred)) {
+ res = nfs_do_access(inode, cred, mask);
+ put_rpccred(cred);
+ } else
+ res = PTR_ERR(cred);
+out:
+ if (!res && (mask & MAY_EXEC) && !execute_ok(inode))
+ res = -EACCES;
+
+ dfprintk(VFS, "NFS: permission(%s/%ld), mask=0x%x, res=%d\n",
+ inode->i_sb->s_id, inode->i_ino, mask, res);
+ return res;
+out_notsup:
+ res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (res == 0)
+ res = generic_permission(inode, mask);
+ goto out;
+}
+
+/*
+ * Local variables:
+ * version-control: t
+ * kept-new-versions: 5
+ * End:
+ */
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
new file mode 100644
index 00000000..481be7f7
--- /dev/null
+++ b/fs/nfs/direct.c
@@ -0,0 +1,1032 @@
+/*
+ * linux/fs/nfs/direct.c
+ *
+ * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
+ *
+ * High-performance uncached I/O for the Linux NFS client
+ *
+ * There are important applications whose performance or correctness
+ * depends on uncached access to file data. Database clusters
+ * (multiple copies of the same instance running on separate hosts)
+ * implement their own cache coherency protocol that subsumes file
+ * system cache protocols. Applications that process datasets
+ * considerably larger than the client's memory do not always benefit
+ * from a local cache. A streaming video server, for instance, has no
+ * need to cache the contents of a file.
+ *
+ * When an application requests uncached I/O, all read and write requests
+ * are made directly to the server; data stored or fetched via these
+ * requests is not cached in the Linux page cache. The client does not
+ * correct unaligned requests from applications. All requested bytes are
+ * held on permanent storage before a direct write system call returns to
+ * an application.
+ *
+ * Solaris implements an uncached I/O facility called directio() that
+ * is used for backups and sequential I/O to very large files. Solaris
+ * also supports uncaching whole NFS partitions with "-o forcedirectio,"
+ * an undocumented mount option.
+ *
+ * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
+ * help from Andrew Morton.
+ *
+ * 18 Dec 2001 Initial implementation for 2.4 --cel
+ * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
+ * 08 Jun 2003 Port to 2.5 APIs --cel
+ * 31 Mar 2004 Handle direct I/O without VFS support --cel
+ * 15 Sep 2004 Parallel async reads --cel
+ * 04 May 2005 support O_DIRECT with aio --cel
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/task_io_accounting_ops.h>
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/sunrpc/clnt.h>
+
+#include <asm/uaccess.h>
+#include <linux/atomic.h>
+
+#include "internal.h"
+#include "iostat.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+static struct kmem_cache *nfs_direct_cachep;
+
+/*
+ * This represents a set of asynchronous requests that we're waiting on
+ */
+struct nfs_direct_req {
+ struct kref kref; /* release manager */
+
+ /* I/O parameters */
+ struct nfs_open_context *ctx; /* file open context info */
+ struct nfs_lock_context *l_ctx; /* Lock context info */
+ struct kiocb * iocb; /* controlling i/o request */
+ struct inode * inode; /* target file of i/o */
+
+ /* completion state */
+ atomic_t io_count; /* i/os we're waiting for */
+ spinlock_t lock; /* protect completion state */
+ ssize_t count, /* bytes actually processed */
+ error; /* any reported error */
+ struct completion completion; /* wait for i/o completion */
+
+ /* commit state */
+ struct list_head rewrite_list; /* saved nfs_write_data structs */
+ struct nfs_write_data * commit_data; /* special write_data for commits */
+ int flags;
+#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
+#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
+ struct nfs_writeverf verf; /* unstable write verifier */
+};
+
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
+static const struct rpc_call_ops nfs_write_direct_ops;
+
+static inline void get_dreq(struct nfs_direct_req *dreq)
+{
+ atomic_inc(&dreq->io_count);
+}
+
+static inline int put_dreq(struct nfs_direct_req *dreq)
+{
+ return atomic_dec_and_test(&dreq->io_count);
+}
+
+/**
+ * nfs_direct_IO - NFS address space operation for direct I/O
+ * @rw: direction (read or write)
+ * @iocb: target I/O control block
+ * @iov: array of vectors that define I/O buffer
+ * @pos: offset in file to begin the operation
+ * @nr_segs: size of iovec array
+ *
+ * The presence of this routine in the address space ops vector means
+ * the NFS client supports direct I/O. However, we shunt off direct
+ * read and write requests before the VFS gets them, so this method
+ * should never be called.
+ */
+ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+{
+ dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
+ iocb->ki_filp->f_path.dentry->d_name.name,
+ (long long) pos, nr_segs);
+
+ return -EINVAL;
+}
+
+static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
+{
+ unsigned int npages;
+ unsigned int i;
+
+ if (count == 0)
+ return;
+ pages += (pgbase >> PAGE_SHIFT);
+ npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ struct page *page = pages[i];
+ if (!PageCompound(page))
+ set_page_dirty(page);
+ }
+}
+
+static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
+{
+ unsigned int i;
+ for (i = 0; i < npages; i++)
+ page_cache_release(pages[i]);
+}
+
+static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
+{
+ struct nfs_direct_req *dreq;
+
+ dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL);
+ if (!dreq)
+ return NULL;
+
+ kref_init(&dreq->kref);
+ kref_get(&dreq->kref);
+ init_completion(&dreq->completion);
+ INIT_LIST_HEAD(&dreq->rewrite_list);
+ dreq->iocb = NULL;
+ dreq->ctx = NULL;
+ dreq->l_ctx = NULL;
+ spin_lock_init(&dreq->lock);
+ atomic_set(&dreq->io_count, 0);
+ dreq->count = 0;
+ dreq->error = 0;
+ dreq->flags = 0;
+
+ return dreq;
+}
+
+static void nfs_direct_req_free(struct kref *kref)
+{
+ struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
+
+ if (dreq->l_ctx != NULL)
+ nfs_put_lock_context(dreq->l_ctx);
+ if (dreq->ctx != NULL)
+ put_nfs_open_context(dreq->ctx);
+ kmem_cache_free(nfs_direct_cachep, dreq);
+}
+
+static void nfs_direct_req_release(struct nfs_direct_req *dreq)
+{
+ kref_put(&dreq->kref, nfs_direct_req_free);
+}
+
+/*
+ * Collects and returns the final error value/byte-count.
+ */
+static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
+{
+ ssize_t result = -EIOCBQUEUED;
+
+ /* Async requests don't wait here */
+ if (dreq->iocb)
+ goto out;
+
+ result = wait_for_completion_killable(&dreq->completion);
+
+ if (!result)
+ result = dreq->error;
+ if (!result)
+ result = dreq->count;
+
+out:
+ return (ssize_t) result;
+}
+
+/*
+ * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
+ * the iocb is still valid here if this is a synchronous request.
+ */
+static void nfs_direct_complete(struct nfs_direct_req *dreq)
+{
+ if (dreq->iocb) {
+ long res = (long) dreq->error;
+ if (!res)
+ res = (long) dreq->count;
+ aio_complete(dreq->iocb, res, 0);
+ }
+ complete_all(&dreq->completion);
+
+ nfs_direct_req_release(dreq);
+}
+
+/*
+ * We must hold a reference to all the pages in this direct read request
+ * until the RPCs complete. This could be long *after* we are woken up in
+ * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
+ */
+static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+
+ nfs_readpage_result(task, data);
+}
+
+static void nfs_direct_read_release(void *calldata)
+{
+
+ struct nfs_read_data *data = calldata;
+ struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+ int status = data->task.tk_status;
+
+ spin_lock(&dreq->lock);
+ if (unlikely(status < 0)) {
+ dreq->error = status;
+ spin_unlock(&dreq->lock);
+ } else {
+ dreq->count += data->res.count;
+ spin_unlock(&dreq->lock);
+ nfs_direct_dirty_pages(data->pagevec,
+ data->args.pgbase,
+ data->res.count);
+ }
+ nfs_direct_release_pages(data->pagevec, data->npages);
+
+ if (put_dreq(dreq))
+ nfs_direct_complete(dreq);
+ nfs_readdata_free(data);
+}
+
+static const struct rpc_call_ops nfs_read_direct_ops = {
+ .rpc_call_prepare = nfs_read_prepare,
+ .rpc_call_done = nfs_direct_read_result,
+ .rpc_release = nfs_direct_read_release,
+};
+
+/*
+ * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
+ * operation. If nfs_readdata_alloc() or get_user_pages() fails,
+ * bail and stop sending more reads. Read length accounting is
+ * handled automatically by nfs_direct_read_result(). Otherwise, if
+ * no requests have been sent, just return an error.
+ */
+static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
+ const struct iovec *iov,
+ loff_t pos)
+{
+ struct nfs_open_context *ctx = dreq->ctx;
+ struct inode *inode = ctx->dentry->d_inode;
+ unsigned long user_addr = (unsigned long)iov->iov_base;
+ size_t count = iov->iov_len;
+ size_t rsize = NFS_SERVER(inode)->rsize;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_cred = ctx->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = NFS_CLIENT(inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs_read_direct_ops,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+ unsigned int pgbase;
+ int result;
+ ssize_t started = 0;
+
+ do {
+ struct nfs_read_data *data;
+ size_t bytes;
+
+ pgbase = user_addr & ~PAGE_MASK;
+ bytes = min(rsize,count);
+
+ result = -ENOMEM;
+ data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes));
+ if (unlikely(!data))
+ break;
+
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
+ data->npages, 1, 0, data->pagevec, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (result < 0) {
+ nfs_readdata_free(data);
+ break;
+ }
+ if ((unsigned)result < data->npages) {
+ bytes = result * PAGE_SIZE;
+ if (bytes <= pgbase) {
+ nfs_direct_release_pages(data->pagevec, result);
+ nfs_readdata_free(data);
+ break;
+ }
+ bytes -= pgbase;
+ data->npages = result;
+ }
+
+ get_dreq(dreq);
+
+ data->req = (struct nfs_page *) dreq;
+ data->inode = inode;
+ data->cred = msg.rpc_cred;
+ data->args.fh = NFS_FH(inode);
+ data->args.context = ctx;
+ data->args.lock_context = dreq->l_ctx;
+ data->args.offset = pos;
+ data->args.pgbase = pgbase;
+ data->args.pages = data->pagevec;
+ data->args.count = bytes;
+ data->res.fattr = &data->fattr;
+ data->res.eof = 0;
+ data->res.count = bytes;
+ nfs_fattr_init(&data->fattr);
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+
+ task_setup_data.task = &data->task;
+ task_setup_data.callback_data = data;
+ NFS_PROTO(inode)->read_setup(data, &msg);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ break;
+ rpc_put_task(task);
+
+ dprintk("NFS: %5u initiated direct read call "
+ "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ bytes,
+ (unsigned long long)data->args.offset);
+
+ started += bytes;
+ user_addr += bytes;
+ pos += bytes;
+ /* FIXME: Remove this unnecessary math from final patch */
+ pgbase += bytes;
+ pgbase &= ~PAGE_MASK;
+ BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
+
+ count -= bytes;
+ } while (count != 0);
+
+ if (started)
+ return started;
+ return result < 0 ? (ssize_t) result : -EFAULT;
+}
+
+static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
+ const struct iovec *iov,
+ unsigned long nr_segs,
+ loff_t pos)
+{
+ ssize_t result = -EINVAL;
+ size_t requested_bytes = 0;
+ unsigned long seg;
+
+ get_dreq(dreq);
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ const struct iovec *vec = &iov[seg];
+ result = nfs_direct_read_schedule_segment(dreq, vec, pos);
+ if (result < 0)
+ break;
+ requested_bytes += result;
+ if ((size_t)result < vec->iov_len)
+ break;
+ pos += vec->iov_len;
+ }
+
+ /*
+ * If no bytes were started, return the error, and let the
+ * generic layer handle the completion.
+ */
+ if (requested_bytes == 0) {
+ nfs_direct_req_release(dreq);
+ return result < 0 ? result : -EIO;
+ }
+
+ if (put_dreq(dreq))
+ nfs_direct_complete(dreq);
+ return 0;
+}
+
+static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ ssize_t result = -ENOMEM;
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct nfs_direct_req *dreq;
+
+ dreq = nfs_direct_req_alloc();
+ if (dreq == NULL)
+ goto out;
+
+ dreq->inode = inode;
+ dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
+ dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
+ if (dreq->l_ctx == NULL)
+ goto out_release;
+ if (!is_sync_kiocb(iocb))
+ dreq->iocb = iocb;
+
+ result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
+ if (!result)
+ result = nfs_direct_wait(dreq);
+out_release:
+ nfs_direct_req_release(dreq);
+out:
+ return result;
+}
+
+static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
+{
+ while (!list_empty(&dreq->rewrite_list)) {
+ struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
+ list_del(&data->pages);
+ nfs_direct_release_pages(data->pagevec, data->npages);
+ nfs_writedata_free(data);
+ }
+}
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
+{
+ struct inode *inode = dreq->inode;
+ struct list_head *p;
+ struct nfs_write_data *data;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_cred = dreq->ctx->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = NFS_CLIENT(inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs_write_direct_ops,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+
+ dreq->count = 0;
+ get_dreq(dreq);
+
+ list_for_each(p, &dreq->rewrite_list) {
+ data = list_entry(p, struct nfs_write_data, pages);
+
+ get_dreq(dreq);
+
+ /* Use stable writes */
+ data->args.stable = NFS_FILE_SYNC;
+
+ /*
+ * Reset data->res.
+ */
+ nfs_fattr_init(&data->fattr);
+ data->res.count = data->args.count;
+ memset(&data->verf, 0, sizeof(data->verf));
+
+ /*
+ * Reuse data->task; data->args should not have changed
+ * since the original request was sent.
+ */
+ task_setup_data.task = &data->task;
+ task_setup_data.callback_data = data;
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ NFS_PROTO(inode)->write_setup(data, &msg);
+
+ /*
+ * We're called via an RPC callback, so BKL is already held.
+ */
+ task = rpc_run_task(&task_setup_data);
+ if (!IS_ERR(task))
+ rpc_put_task(task);
+
+ dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+ }
+
+ if (put_dreq(dreq))
+ nfs_direct_write_complete(dreq, inode);
+}
+
+static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+
+ /* Call the NFS version-specific code */
+ NFS_PROTO(data->inode)->commit_done(task, data);
+}
+
+static void nfs_direct_commit_release(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+ int status = data->task.tk_status;
+
+ if (status < 0) {
+ dprintk("NFS: %5u commit failed with error %d.\n",
+ data->task.tk_pid, status);
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
+ dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ }
+
+ dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
+ nfs_direct_write_complete(dreq, data->inode);
+ nfs_commit_free(data);
+}
+
+static const struct rpc_call_ops nfs_commit_direct_ops = {
+ .rpc_call_prepare = nfs_write_prepare,
+ .rpc_call_done = nfs_direct_commit_result,
+ .rpc_release = nfs_direct_commit_release,
+};
+
+static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
+{
+ struct nfs_write_data *data = dreq->commit_data;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = dreq->ctx->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .task = &data->task,
+ .rpc_client = NFS_CLIENT(dreq->inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs_commit_direct_ops,
+ .callback_data = data,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+
+ data->inode = dreq->inode;
+ data->cred = msg.rpc_cred;
+
+ data->args.fh = NFS_FH(data->inode);
+ data->args.offset = 0;
+ data->args.count = 0;
+ data->args.context = dreq->ctx;
+ data->args.lock_context = dreq->l_ctx;
+ data->res.count = 0;
+ data->res.fattr = &data->fattr;
+ data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
+
+ NFS_PROTO(data->inode)->commit_setup(data, &msg);
+
+ /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
+ dreq->commit_data = NULL;
+
+ dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+
+ task = rpc_run_task(&task_setup_data);
+ if (!IS_ERR(task))
+ rpc_put_task(task);
+}
+
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
+{
+ int flags = dreq->flags;
+
+ dreq->flags = 0;
+ switch (flags) {
+ case NFS_ODIRECT_DO_COMMIT:
+ nfs_direct_commit_schedule(dreq);
+ break;
+ case NFS_ODIRECT_RESCHED_WRITES:
+ nfs_direct_write_reschedule(dreq);
+ break;
+ default:
+ if (dreq->commit_data != NULL)
+ nfs_commit_free(dreq->commit_data);
+ nfs_direct_free_writedata(dreq);
+ nfs_zap_mapping(inode, inode->i_mapping);
+ nfs_direct_complete(dreq);
+ }
+}
+
+static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+{
+ dreq->commit_data = nfs_commitdata_alloc();
+ if (dreq->commit_data != NULL)
+ dreq->commit_data->req = (struct nfs_page *) dreq;
+}
+#else
+static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
+{
+ dreq->commit_data = NULL;
+}
+
+static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
+{
+ nfs_direct_free_writedata(dreq);
+ nfs_zap_mapping(inode, inode->i_mapping);
+ nfs_direct_complete(dreq);
+}
+#endif
+
+static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+
+ nfs_writeback_done(task, data);
+}
+
+/*
+ * NB: Return the value of the first error return code. Subsequent
+ * errors after the first one are ignored.
+ */
+static void nfs_direct_write_release(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
+ int status = data->task.tk_status;
+
+ spin_lock(&dreq->lock);
+
+ if (unlikely(status < 0)) {
+ /* An error has occurred, so we should not commit */
+ dreq->flags = 0;
+ dreq->error = status;
+ }
+ if (unlikely(dreq->error != 0))
+ goto out_unlock;
+
+ dreq->count += data->res.count;
+
+ if (data->res.verf->committed != NFS_FILE_SYNC) {
+ switch (dreq->flags) {
+ case 0:
+ memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
+ dreq->flags = NFS_ODIRECT_DO_COMMIT;
+ break;
+ case NFS_ODIRECT_DO_COMMIT:
+ if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
+ dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
+ }
+ }
+ }
+out_unlock:
+ spin_unlock(&dreq->lock);
+
+ if (put_dreq(dreq))
+ nfs_direct_write_complete(dreq, data->inode);
+}
+
+static const struct rpc_call_ops nfs_write_direct_ops = {
+ .rpc_call_prepare = nfs_write_prepare,
+ .rpc_call_done = nfs_direct_write_result,
+ .rpc_release = nfs_direct_write_release,
+};
+
+/*
+ * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
+ * operation. If nfs_writedata_alloc() or get_user_pages() fails,
+ * bail and stop sending more writes. Write length accounting is
+ * handled automatically by nfs_direct_write_result(). Otherwise, if
+ * no requests have been sent, just return an error.
+ */
+static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
+ const struct iovec *iov,
+ loff_t pos, int sync)
+{
+ struct nfs_open_context *ctx = dreq->ctx;
+ struct inode *inode = ctx->dentry->d_inode;
+ unsigned long user_addr = (unsigned long)iov->iov_base;
+ size_t count = iov->iov_len;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_cred = ctx->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = NFS_CLIENT(inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs_write_direct_ops,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+ size_t wsize = NFS_SERVER(inode)->wsize;
+ unsigned int pgbase;
+ int result;
+ ssize_t started = 0;
+
+ do {
+ struct nfs_write_data *data;
+ size_t bytes;
+
+ pgbase = user_addr & ~PAGE_MASK;
+ bytes = min(wsize,count);
+
+ result = -ENOMEM;
+ data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes));
+ if (unlikely(!data))
+ break;
+
+ down_read(&current->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, user_addr,
+ data->npages, 0, 0, data->pagevec, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (result < 0) {
+ nfs_writedata_free(data);
+ break;
+ }
+ if ((unsigned)result < data->npages) {
+ bytes = result * PAGE_SIZE;
+ if (bytes <= pgbase) {
+ nfs_direct_release_pages(data->pagevec, result);
+ nfs_writedata_free(data);
+ break;
+ }
+ bytes -= pgbase;
+ data->npages = result;
+ }
+
+ get_dreq(dreq);
+
+ list_move_tail(&data->pages, &dreq->rewrite_list);
+
+ data->req = (struct nfs_page *) dreq;
+ data->inode = inode;
+ data->cred = msg.rpc_cred;
+ data->args.fh = NFS_FH(inode);
+ data->args.context = ctx;
+ data->args.lock_context = dreq->l_ctx;
+ data->args.offset = pos;
+ data->args.pgbase = pgbase;
+ data->args.pages = data->pagevec;
+ data->args.count = bytes;
+ data->args.stable = sync;
+ data->res.fattr = &data->fattr;
+ data->res.count = bytes;
+ data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
+
+ task_setup_data.task = &data->task;
+ task_setup_data.callback_data = data;
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ NFS_PROTO(inode)->write_setup(data, &msg);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ break;
+ rpc_put_task(task);
+
+ dprintk("NFS: %5u initiated direct write call "
+ "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ bytes,
+ (unsigned long long)data->args.offset);
+
+ started += bytes;
+ user_addr += bytes;
+ pos += bytes;
+
+ /* FIXME: Remove this useless math from the final patch */
+ pgbase += bytes;
+ pgbase &= ~PAGE_MASK;
+ BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
+
+ count -= bytes;
+ } while (count != 0);
+
+ if (started)
+ return started;
+ return result < 0 ? (ssize_t) result : -EFAULT;
+}
+
+static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
+ const struct iovec *iov,
+ unsigned long nr_segs,
+ loff_t pos, int sync)
+{
+ ssize_t result = 0;
+ size_t requested_bytes = 0;
+ unsigned long seg;
+
+ get_dreq(dreq);
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ const struct iovec *vec = &iov[seg];
+ result = nfs_direct_write_schedule_segment(dreq, vec,
+ pos, sync);
+ if (result < 0)
+ break;
+ requested_bytes += result;
+ if ((size_t)result < vec->iov_len)
+ break;
+ pos += vec->iov_len;
+ }
+
+ /*
+ * If no bytes were started, return the error, and let the
+ * generic layer handle the completion.
+ */
+ if (requested_bytes == 0) {
+ nfs_direct_req_release(dreq);
+ return result < 0 ? result : -EIO;
+ }
+
+ if (put_dreq(dreq))
+ nfs_direct_write_complete(dreq, dreq->inode);
+ return 0;
+}
+
+static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos,
+ size_t count)
+{
+ ssize_t result = -ENOMEM;
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ struct nfs_direct_req *dreq;
+ size_t wsize = NFS_SERVER(inode)->wsize;
+ int sync = NFS_UNSTABLE;
+
+ dreq = nfs_direct_req_alloc();
+ if (!dreq)
+ goto out;
+ nfs_alloc_commit_data(dreq);
+
+ if (dreq->commit_data == NULL || count <= wsize)
+ sync = NFS_FILE_SYNC;
+
+ dreq->inode = inode;
+ dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
+ dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
+ if (dreq->l_ctx == NULL)
+ goto out_release;
+ if (!is_sync_kiocb(iocb))
+ dreq->iocb = iocb;
+
+ result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
+ if (!result)
+ result = nfs_direct_wait(dreq);
+out_release:
+ nfs_direct_req_release(dreq);
+out:
+ return result;
+}
+
+/**
+ * nfs_file_direct_read - file direct read operation for NFS files
+ * @iocb: target I/O control block
+ * @iov: vector of user buffers into which to read data
+ * @nr_segs: size of iov vector
+ * @pos: byte offset in file where reading starts
+ *
+ * We use this function for direct reads instead of calling
+ * generic_file_aio_read() in order to avoid gfar's check to see if
+ * the request starts before the end of the file. For that check
+ * to work, we must generate a GETATTR before each direct read, and
+ * even then there is a window between the GETATTR and the subsequent
+ * READ where the file size could change. Our preference is simply
+ * to do all reads the application wants, and the server will take
+ * care of managing the end of file boundary.
+ *
+ * This function also eliminates unnecessarily updating the file's
+ * atime locally, as the NFS server sets the file's atime, and this
+ * client must read the updated atime from the server back into its
+ * cache.
+ */
+ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ ssize_t retval = -EINVAL;
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ size_t count;
+
+ count = iov_length(iov, nr_segs);
+ nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
+
+ dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
+ file->f_path.dentry->d_parent->d_name.name,
+ file->f_path.dentry->d_name.name,
+ count, (long long) pos);
+
+ retval = 0;
+ if (!count)
+ goto out;
+
+ retval = nfs_sync_mapping(mapping);
+ if (retval)
+ goto out;
+
+ task_io_account_read(count);
+
+ retval = nfs_direct_read(iocb, iov, nr_segs, pos);
+ if (retval > 0)
+ iocb->ki_pos = pos + retval;
+
+out:
+ return retval;
+}
+
+/**
+ * nfs_file_direct_write - file direct write operation for NFS files
+ * @iocb: target I/O control block
+ * @iov: vector of user buffers from which to write data
+ * @nr_segs: size of iov vector
+ * @pos: byte offset in file where writing starts
+ *
+ * We use this function for direct writes instead of calling
+ * generic_file_aio_write() in order to avoid taking the inode
+ * semaphore and updating the i_size. The NFS server will set
+ * the new i_size and this client must read the updated size
+ * back into its cache. We let the server do generic write
+ * parameter checking and report problems.
+ *
+ * We eliminate local atime updates, see direct read above.
+ *
+ * We avoid unnecessary page cache invalidations for normal cached
+ * readers of this file.
+ *
+ * Note that O_APPEND is not supported for NFS direct writes, as there
+ * is no atomic O_APPEND write facility in the NFS protocol.
+ */
+ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ ssize_t retval = -EINVAL;
+ struct file *file = iocb->ki_filp;
+ struct address_space *mapping = file->f_mapping;
+ size_t count;
+
+ count = iov_length(iov, nr_segs);
+ nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
+
+ dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
+ file->f_path.dentry->d_parent->d_name.name,
+ file->f_path.dentry->d_name.name,
+ count, (long long) pos);
+
+ retval = generic_write_checks(file, &pos, &count, 0);
+ if (retval)
+ goto out;
+
+ retval = -EINVAL;
+ if ((ssize_t) count < 0)
+ goto out;
+ retval = 0;
+ if (!count)
+ goto out;
+
+ retval = nfs_sync_mapping(mapping);
+ if (retval)
+ goto out;
+
+ task_io_account_write(count);
+
+ retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
+
+ if (retval > 0)
+ iocb->ki_pos = pos + retval;
+
+out:
+ return retval;
+}
+
+/**
+ * nfs_init_directcache - create a slab cache for nfs_direct_req structures
+ *
+ */
+int __init nfs_init_directcache(void)
+{
+ nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
+ sizeof(struct nfs_direct_req),
+ 0, (SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD),
+ NULL);
+ if (nfs_direct_cachep == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
+ *
+ */
+void nfs_destroy_directcache(void)
+{
+ kmem_cache_destroy(nfs_direct_cachep);
+}
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
new file mode 100644
index 00000000..b3924b8a
--- /dev/null
+++ b/fs/nfs/dns_resolve.c
@@ -0,0 +1,448 @@
+/*
+ * linux/fs/nfs/dns_resolve.c
+ *
+ * Copyright (c) 2009 Trond Myklebust <Trond.Myklebust@netapp.com>
+ *
+ * Resolves DNS hostnames into valid ip addresses
+ */
+
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/dns_resolver.h>
+#include "dns_resolve.h"
+
+ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
+ struct sockaddr *sa, size_t salen)
+{
+ ssize_t ret;
+ char *ip_addr = NULL;
+ int ip_len;
+
+ ip_len = dns_query(NULL, name, namelen, NULL, &ip_addr, NULL);
+ if (ip_len > 0)
+ ret = rpc_pton(net, ip_addr, ip_len, sa, salen);
+ else
+ ret = -ESRCH;
+ kfree(ip_addr);
+ return ret;
+}
+
+#else
+
+#include <linux/hash.h>
+#include <linux/string.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/seq_file.h>
+#include <linux/inet.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/cache.h>
+#include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+
+#include "dns_resolve.h"
+#include "cache_lib.h"
+#include "netns.h"
+
+#define NFS_DNS_HASHBITS 4
+#define NFS_DNS_HASHTBL_SIZE (1 << NFS_DNS_HASHBITS)
+
+struct nfs_dns_ent {
+ struct cache_head h;
+
+ char *hostname;
+ size_t namelen;
+
+ struct sockaddr_storage addr;
+ size_t addrlen;
+};
+
+
+static void nfs_dns_ent_update(struct cache_head *cnew,
+ struct cache_head *ckey)
+{
+ struct nfs_dns_ent *new;
+ struct nfs_dns_ent *key;
+
+ new = container_of(cnew, struct nfs_dns_ent, h);
+ key = container_of(ckey, struct nfs_dns_ent, h);
+
+ memcpy(&new->addr, &key->addr, key->addrlen);
+ new->addrlen = key->addrlen;
+}
+
+static void nfs_dns_ent_init(struct cache_head *cnew,
+ struct cache_head *ckey)
+{
+ struct nfs_dns_ent *new;
+ struct nfs_dns_ent *key;
+
+ new = container_of(cnew, struct nfs_dns_ent, h);
+ key = container_of(ckey, struct nfs_dns_ent, h);
+
+ kfree(new->hostname);
+ new->hostname = kstrndup(key->hostname, key->namelen, GFP_KERNEL);
+ if (new->hostname) {
+ new->namelen = key->namelen;
+ nfs_dns_ent_update(cnew, ckey);
+ } else {
+ new->namelen = 0;
+ new->addrlen = 0;
+ }
+}
+
+static void nfs_dns_ent_put(struct kref *ref)
+{
+ struct nfs_dns_ent *item;
+
+ item = container_of(ref, struct nfs_dns_ent, h.ref);
+ kfree(item->hostname);
+ kfree(item);
+}
+
+static struct cache_head *nfs_dns_ent_alloc(void)
+{
+ struct nfs_dns_ent *item = kmalloc(sizeof(*item), GFP_KERNEL);
+
+ if (item != NULL) {
+ item->hostname = NULL;
+ item->namelen = 0;
+ item->addrlen = 0;
+ return &item->h;
+ }
+ return NULL;
+};
+
+static unsigned int nfs_dns_hash(const struct nfs_dns_ent *key)
+{
+ return hash_str(key->hostname, NFS_DNS_HASHBITS);
+}
+
+static void nfs_dns_request(struct cache_detail *cd,
+ struct cache_head *ch,
+ char **bpp, int *blen)
+{
+ struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
+
+ qword_add(bpp, blen, key->hostname);
+ (*bpp)[-1] = '\n';
+}
+
+static int nfs_dns_upcall(struct cache_detail *cd,
+ struct cache_head *ch)
+{
+ struct nfs_dns_ent *key = container_of(ch, struct nfs_dns_ent, h);
+ int ret;
+
+ ret = nfs_cache_upcall(cd, key->hostname);
+ if (ret)
+ ret = sunrpc_cache_pipe_upcall(cd, ch, nfs_dns_request);
+ return ret;
+}
+
+static int nfs_dns_match(struct cache_head *ca,
+ struct cache_head *cb)
+{
+ struct nfs_dns_ent *a;
+ struct nfs_dns_ent *b;
+
+ a = container_of(ca, struct nfs_dns_ent, h);
+ b = container_of(cb, struct nfs_dns_ent, h);
+
+ if (a->namelen == 0 || a->namelen != b->namelen)
+ return 0;
+ return memcmp(a->hostname, b->hostname, a->namelen) == 0;
+}
+
+static int nfs_dns_show(struct seq_file *m, struct cache_detail *cd,
+ struct cache_head *h)
+{
+ struct nfs_dns_ent *item;
+ long ttl;
+
+ if (h == NULL) {
+ seq_puts(m, "# ip address hostname ttl\n");
+ return 0;
+ }
+ item = container_of(h, struct nfs_dns_ent, h);
+ ttl = item->h.expiry_time - seconds_since_boot();
+ if (ttl < 0)
+ ttl = 0;
+
+ if (!test_bit(CACHE_NEGATIVE, &h->flags)) {
+ char buf[INET6_ADDRSTRLEN+IPV6_SCOPE_ID_LEN+1];
+
+ rpc_ntop((struct sockaddr *)&item->addr, buf, sizeof(buf));
+ seq_printf(m, "%15s ", buf);
+ } else
+ seq_puts(m, "<none> ");
+ seq_printf(m, "%15s %ld\n", item->hostname, ttl);
+ return 0;
+}
+
+static struct nfs_dns_ent *nfs_dns_lookup(struct cache_detail *cd,
+ struct nfs_dns_ent *key)
+{
+ struct cache_head *ch;
+
+ ch = sunrpc_cache_lookup(cd,
+ &key->h,
+ nfs_dns_hash(key));
+ if (!ch)
+ return NULL;
+ return container_of(ch, struct nfs_dns_ent, h);
+}
+
+static struct nfs_dns_ent *nfs_dns_update(struct cache_detail *cd,
+ struct nfs_dns_ent *new,
+ struct nfs_dns_ent *key)
+{
+ struct cache_head *ch;
+
+ ch = sunrpc_cache_update(cd,
+ &new->h, &key->h,
+ nfs_dns_hash(key));
+ if (!ch)
+ return NULL;
+ return container_of(ch, struct nfs_dns_ent, h);
+}
+
+static int nfs_dns_parse(struct cache_detail *cd, char *buf, int buflen)
+{
+ char buf1[NFS_DNS_HOSTNAME_MAXLEN+1];
+ struct nfs_dns_ent key, *item;
+ unsigned long ttl;
+ ssize_t len;
+ int ret = -EINVAL;
+
+ if (buf[buflen-1] != '\n')
+ goto out;
+ buf[buflen-1] = '\0';
+
+ len = qword_get(&buf, buf1, sizeof(buf1));
+ if (len <= 0)
+ goto out;
+ key.addrlen = rpc_pton(cd->net, buf1, len,
+ (struct sockaddr *)&key.addr,
+ sizeof(key.addr));
+
+ len = qword_get(&buf, buf1, sizeof(buf1));
+ if (len <= 0)
+ goto out;
+
+ key.hostname = buf1;
+ key.namelen = len;
+ memset(&key.h, 0, sizeof(key.h));
+
+ ttl = get_expiry(&buf);
+ if (ttl == 0)
+ goto out;
+ key.h.expiry_time = ttl + seconds_since_boot();
+
+ ret = -ENOMEM;
+ item = nfs_dns_lookup(cd, &key);
+ if (item == NULL)
+ goto out;
+
+ if (key.addrlen == 0)
+ set_bit(CACHE_NEGATIVE, &key.h.flags);
+
+ item = nfs_dns_update(cd, &key, item);
+ if (item == NULL)
+ goto out;
+
+ ret = 0;
+ cache_put(&item->h, cd);
+out:
+ return ret;
+}
+
+static int do_cache_lookup(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item,
+ struct nfs_cache_defer_req *dreq)
+{
+ int ret = -ENOMEM;
+
+ *item = nfs_dns_lookup(cd, key);
+ if (*item) {
+ ret = cache_check(cd, &(*item)->h, &dreq->req);
+ if (ret)
+ *item = NULL;
+ }
+ return ret;
+}
+
+static int do_cache_lookup_nowait(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item)
+{
+ int ret = -ENOMEM;
+
+ *item = nfs_dns_lookup(cd, key);
+ if (!*item)
+ goto out_err;
+ ret = -ETIMEDOUT;
+ if (!test_bit(CACHE_VALID, &(*item)->h.flags)
+ || (*item)->h.expiry_time < seconds_since_boot()
+ || cd->flush_time > (*item)->h.last_refresh)
+ goto out_put;
+ ret = -ENOENT;
+ if (test_bit(CACHE_NEGATIVE, &(*item)->h.flags))
+ goto out_put;
+ return 0;
+out_put:
+ cache_put(&(*item)->h, cd);
+out_err:
+ *item = NULL;
+ return ret;
+}
+
+static int do_cache_lookup_wait(struct cache_detail *cd,
+ struct nfs_dns_ent *key,
+ struct nfs_dns_ent **item)
+{
+ struct nfs_cache_defer_req *dreq;
+ int ret = -ENOMEM;
+
+ dreq = nfs_cache_defer_req_alloc();
+ if (!dreq)
+ goto out;
+ ret = do_cache_lookup(cd, key, item, dreq);
+ if (ret == -EAGAIN) {
+ ret = nfs_cache_wait_for_upcall(dreq);
+ if (!ret)
+ ret = do_cache_lookup_nowait(cd, key, item);
+ }
+ nfs_cache_defer_req_put(dreq);
+out:
+ return ret;
+}
+
+ssize_t nfs_dns_resolve_name(struct net *net, char *name,
+ size_t namelen, struct sockaddr *sa, size_t salen)
+{
+ struct nfs_dns_ent key = {
+ .hostname = name,
+ .namelen = namelen,
+ };
+ struct nfs_dns_ent *item = NULL;
+ ssize_t ret;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+
+ ret = do_cache_lookup_wait(nn->nfs_dns_resolve, &key, &item);
+ if (ret == 0) {
+ if (salen >= item->addrlen) {
+ memcpy(sa, &item->addr, item->addrlen);
+ ret = item->addrlen;
+ } else
+ ret = -EOVERFLOW;
+ cache_put(&item->h, nn->nfs_dns_resolve);
+ } else if (ret == -ENOENT)
+ ret = -ESRCH;
+ return ret;
+}
+
+int nfs_dns_resolver_cache_init(struct net *net)
+{
+ int err = -ENOMEM;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ struct cache_detail *cd;
+ struct cache_head **tbl;
+
+ cd = kzalloc(sizeof(struct cache_detail), GFP_KERNEL);
+ if (cd == NULL)
+ goto err_cd;
+
+ tbl = kzalloc(NFS_DNS_HASHTBL_SIZE * sizeof(struct cache_head *),
+ GFP_KERNEL);
+ if (tbl == NULL)
+ goto err_tbl;
+
+ cd->owner = THIS_MODULE,
+ cd->hash_size = NFS_DNS_HASHTBL_SIZE,
+ cd->hash_table = tbl,
+ cd->name = "dns_resolve",
+ cd->cache_put = nfs_dns_ent_put,
+ cd->cache_upcall = nfs_dns_upcall,
+ cd->cache_parse = nfs_dns_parse,
+ cd->cache_show = nfs_dns_show,
+ cd->match = nfs_dns_match,
+ cd->init = nfs_dns_ent_init,
+ cd->update = nfs_dns_ent_update,
+ cd->alloc = nfs_dns_ent_alloc,
+
+ nfs_cache_init(cd);
+ err = nfs_cache_register_net(net, cd);
+ if (err)
+ goto err_reg;
+ nn->nfs_dns_resolve = cd;
+ return 0;
+
+err_reg:
+ nfs_cache_destroy(cd);
+ kfree(cd->hash_table);
+err_tbl:
+ kfree(cd);
+err_cd:
+ return err;
+}
+
+void nfs_dns_resolver_cache_destroy(struct net *net)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ struct cache_detail *cd = nn->nfs_dns_resolve;
+
+ nfs_cache_unregister_net(net, cd);
+ nfs_cache_destroy(cd);
+ kfree(cd->hash_table);
+ kfree(cd);
+}
+
+static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct super_block *sb = ptr;
+ struct net *net = sb->s_fs_info;
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ struct cache_detail *cd = nn->nfs_dns_resolve;
+ int ret = 0;
+
+ if (cd == NULL)
+ return 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return 0;
+
+ switch (event) {
+ case RPC_PIPEFS_MOUNT:
+ ret = nfs_cache_register_sb(sb, cd);
+ break;
+ case RPC_PIPEFS_UMOUNT:
+ nfs_cache_unregister_sb(sb, cd);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static struct notifier_block nfs_dns_resolver_block = {
+ .notifier_call = rpc_pipefs_event,
+};
+
+int nfs_dns_resolver_init(void)
+{
+ return rpc_pipefs_notifier_register(&nfs_dns_resolver_block);
+}
+
+void nfs_dns_resolver_destroy(void)
+{
+ rpc_pipefs_notifier_unregister(&nfs_dns_resolver_block);
+}
+#endif
diff --git a/fs/nfs/dns_resolve.h b/fs/nfs/dns_resolve.h
new file mode 100644
index 00000000..2e4f596d
--- /dev/null
+++ b/fs/nfs/dns_resolve.h
@@ -0,0 +1,36 @@
+/*
+ * Resolve DNS hostnames into valid ip addresses
+ */
+#ifndef __LINUX_FS_NFS_DNS_RESOLVE_H
+#define __LINUX_FS_NFS_DNS_RESOLVE_H
+
+#define NFS_DNS_HOSTNAME_MAXLEN (128)
+
+
+#ifdef CONFIG_NFS_USE_KERNEL_DNS
+static inline int nfs_dns_resolver_init(void)
+{
+ return 0;
+}
+
+static inline void nfs_dns_resolver_destroy(void)
+{}
+
+static inline int nfs_dns_resolver_cache_init(struct net *net)
+{
+ return 0;
+}
+
+static inline void nfs_dns_resolver_cache_destroy(struct net *net)
+{}
+#else
+extern int nfs_dns_resolver_init(void);
+extern void nfs_dns_resolver_destroy(void);
+extern int nfs_dns_resolver_cache_init(struct net *net);
+extern void nfs_dns_resolver_cache_destroy(struct net *net);
+#endif
+
+extern ssize_t nfs_dns_resolve_name(struct net *net, char *name,
+ size_t namelen, struct sockaddr *sa, size_t salen);
+
+#endif
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
new file mode 100644
index 00000000..aa9b709f
--- /dev/null
+++ b/fs/nfs/file.c
@@ -0,0 +1,900 @@
+/*
+ * linux/fs/nfs/file.c
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ *
+ * Changes Copyright (C) 1994 by Florian La Roche
+ * - Do not copy data too often around in the kernel.
+ * - In nfs_file_read the return value of kmalloc wasn't checked.
+ * - Put in a better version of read look-ahead buffering. Original idea
+ * and implementation by Wai S Kok elekokws@ee.nus.sg.
+ *
+ * Expire cache on write to a file by Wai S Kok (Oct 1994).
+ *
+ * Total rewrite of read side for new NFS buffer cache.. Linus.
+ *
+ * nfs regular file handling functions
+ */
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/aio.h>
+#include <linux/gfp.h>
+#include <linux/swap.h>
+
+#include <asm/uaccess.h>
+
+#include "delegation.h"
+#include "internal.h"
+#include "iostat.h"
+#include "fscache.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_FILE
+
+static const struct vm_operations_struct nfs_file_vm_ops;
+
+const struct inode_operations nfs_file_inode_operations = {
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+};
+
+#ifdef CONFIG_NFS_V3
+const struct inode_operations nfs3_file_inode_operations = {
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+ .listxattr = nfs3_listxattr,
+ .getxattr = nfs3_getxattr,
+ .setxattr = nfs3_setxattr,
+ .removexattr = nfs3_removexattr,
+};
+#endif /* CONFIG_NFS_v3 */
+
+/* Hack for future NFS swap support */
+#ifndef IS_SWAPFILE
+# define IS_SWAPFILE(inode) (0)
+#endif
+
+static int nfs_check_flags(int flags)
+{
+ if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Open file
+ */
+static int
+nfs_file_open(struct inode *inode, struct file *filp)
+{
+ int res;
+
+ dprintk("NFS: open file(%s/%s)\n",
+ filp->f_path.dentry->d_parent->d_name.name,
+ filp->f_path.dentry->d_name.name);
+
+ nfs_inc_stats(inode, NFSIOS_VFSOPEN);
+ res = nfs_check_flags(filp->f_flags);
+ if (res)
+ return res;
+
+ res = nfs_open(inode, filp);
+ return res;
+}
+
+static int
+nfs_file_release(struct inode *inode, struct file *filp)
+{
+ dprintk("NFS: release(%s/%s)\n",
+ filp->f_path.dentry->d_parent->d_name.name,
+ filp->f_path.dentry->d_name.name);
+
+ nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
+ return nfs_release(inode, filp);
+}
+
+/**
+ * nfs_revalidate_size - Revalidate the file size
+ * @inode - pointer to inode struct
+ * @file - pointer to struct file
+ *
+ * Revalidates the file length. This is basically a wrapper around
+ * nfs_revalidate_inode() that takes into account the fact that we may
+ * have cached writes (in which case we don't care about the server's
+ * idea of what the file length is), or O_DIRECT (in which case we
+ * shouldn't trust the cache).
+ */
+static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ if (nfs_have_delegated_attributes(inode))
+ goto out_noreval;
+
+ if (filp->f_flags & O_DIRECT)
+ goto force_reval;
+ if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ goto force_reval;
+ if (nfs_attribute_timeout(inode))
+ goto force_reval;
+out_noreval:
+ return 0;
+force_reval:
+ return __nfs_revalidate_inode(server, inode);
+}
+
+static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
+{
+ dprintk("NFS: llseek file(%s/%s, %lld, %d)\n",
+ filp->f_path.dentry->d_parent->d_name.name,
+ filp->f_path.dentry->d_name.name,
+ offset, origin);
+
+ /*
+ * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
+ * the cached file length
+ */
+ if (origin != SEEK_SET && origin != SEEK_CUR) {
+ struct inode *inode = filp->f_mapping->host;
+
+ int retval = nfs_revalidate_file_size(inode, filp);
+ if (retval < 0)
+ return (loff_t)retval;
+ }
+
+ return generic_file_llseek(filp, offset, origin);
+}
+
+/*
+ * Flush all dirty pages, and check for write errors.
+ */
+static int
+nfs_file_flush(struct file *file, fl_owner_t id)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+
+ dprintk("NFS: flush(%s/%s)\n",
+ dentry->d_parent->d_name.name,
+ dentry->d_name.name);
+
+ nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
+ if ((file->f_mode & FMODE_WRITE) == 0)
+ return 0;
+
+ /* Flush writes to the server and return any errors */
+ return vfs_fsync(file, 0);
+}
+
+static ssize_t
+nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct dentry * dentry = iocb->ki_filp->f_path.dentry;
+ struct inode * inode = dentry->d_inode;
+ ssize_t result;
+
+ if (iocb->ki_filp->f_flags & O_DIRECT)
+ return nfs_file_direct_read(iocb, iov, nr_segs, pos);
+
+ dprintk("NFS: read(%s/%s, %lu@%lu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) iov_length(iov, nr_segs), (unsigned long) pos);
+
+ result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
+ if (!result) {
+ result = generic_file_aio_read(iocb, iov, nr_segs, pos);
+ if (result > 0)
+ nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
+ }
+ return result;
+}
+
+static ssize_t
+nfs_file_splice_read(struct file *filp, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t count,
+ unsigned int flags)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ ssize_t res;
+
+ dprintk("NFS: splice_read(%s/%s, %lu@%Lu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) count, (unsigned long long) *ppos);
+
+ res = nfs_revalidate_mapping(inode, filp->f_mapping);
+ if (!res) {
+ res = generic_file_splice_read(filp, ppos, pipe, count, flags);
+ if (res > 0)
+ nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, res);
+ }
+ return res;
+}
+
+static int
+nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ int status;
+
+ dprintk("NFS: mmap(%s/%s)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+ /* Note: generic_file_mmap() returns ENOSYS on nommu systems
+ * so we call that before revalidating the mapping
+ */
+ status = generic_file_mmap(file, vma);
+ if (!status) {
+ vma->vm_ops = &nfs_file_vm_ops;
+ status = nfs_revalidate_mapping(inode, file->f_mapping);
+ }
+ return status;
+}
+
+/*
+ * Flush any dirty pages for this process, and check for write errors.
+ * The return status from this call provides a reliable indication of
+ * whether any write errors occurred for this process.
+ *
+ * Notice that it clears the NFS_CONTEXT_ERROR_WRITE before synching to
+ * disk, but it retrieves and clears ctx->error after synching, despite
+ * the two being set at the same time in nfs_context_set_write_error().
+ * This is because the former is used to notify the _next_ call to
+ * nfs_file_write() that a write error occurred, and hence cause it to
+ * fall back to doing a synchronous write.
+ */
+static int
+nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+{
+ struct dentry *dentry = file->f_path.dentry;
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct inode *inode = dentry->d_inode;
+ int have_error, status;
+ int ret = 0;
+
+ dprintk("NFS: fsync file(%s/%s) datasync %d\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ datasync);
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ mutex_lock(&inode->i_mutex);
+
+ nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
+ have_error = test_and_clear_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ status = nfs_commit_inode(inode, FLUSH_SYNC);
+ if (status >= 0 && ret < 0)
+ status = ret;
+ have_error |= test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+ if (have_error)
+ ret = xchg(&ctx->error, 0);
+ if (!ret && status < 0)
+ ret = status;
+ if (!ret && !datasync)
+ /* application has asked for meta-data sync */
+ ret = pnfs_layoutcommit_inode(inode, true);
+ mutex_unlock(&inode->i_mutex);
+ return ret;
+}
+
+/*
+ * Decide whether a read/modify/write cycle may be more efficient
+ * then a modify/write/read cycle when writing to a page in the
+ * page cache.
+ *
+ * The modify/write/read cycle may occur if a page is read before
+ * being completely filled by the writer. In this situation, the
+ * page must be completely written to stable storage on the server
+ * before it can be refilled by reading in the page from the server.
+ * This can lead to expensive, small, FILE_SYNC mode writes being
+ * done.
+ *
+ * It may be more efficient to read the page first if the file is
+ * open for reading in addition to writing, the page is not marked
+ * as Uptodate, it is not dirty or waiting to be committed,
+ * indicating that it was previously allocated and then modified,
+ * that there were valid bytes of data in that range of the file,
+ * and that the new data won't completely replace the old data in
+ * that range of the file.
+ */
+static int nfs_want_read_modify_write(struct file *file, struct page *page,
+ loff_t pos, unsigned len)
+{
+ unsigned int pglen = nfs_page_length(page);
+ unsigned int offset = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned int end = offset + len;
+
+ if ((file->f_mode & FMODE_READ) && /* open for read? */
+ !PageUptodate(page) && /* Uptodate? */
+ !PagePrivate(page) && /* i/o request already? */
+ pglen && /* valid bytes of file? */
+ (end < pglen || offset)) /* replace all valid bytes? */
+ return 1;
+ return 0;
+}
+
+/*
+ * This does the "real" work of the write. We must allocate and lock the
+ * page to be sent back to the generic routine, which then copies the
+ * data from user space.
+ *
+ * If the writer ends up delaying the write, the writer needs to
+ * increment the page use counts until he is done with the page.
+ */
+static int nfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ int ret;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ struct page *page;
+ int once_thru = 0;
+
+ dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n",
+ file->f_path.dentry->d_parent->d_name.name,
+ file->f_path.dentry->d_name.name,
+ mapping->host->i_ino, len, (long long) pos);
+
+start:
+ /*
+ * Prevent starvation issues if someone is doing a consistency
+ * sync-to-disk
+ */
+ ret = wait_on_bit(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING,
+ nfs_wait_bit_killable, TASK_KILLABLE);
+ if (ret)
+ return ret;
+
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+ *pagep = page;
+
+ ret = nfs_flush_incompatible(file, page);
+ if (ret) {
+ unlock_page(page);
+ page_cache_release(page);
+ } else if (!once_thru &&
+ nfs_want_read_modify_write(file, page, pos, len)) {
+ once_thru = 1;
+ ret = nfs_readpage(file, page);
+ page_cache_release(page);
+ if (!ret)
+ goto start;
+ }
+ return ret;
+}
+
+static int nfs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+ int status;
+
+ dfprintk(PAGECACHE, "NFS: write_end(%s/%s(%ld), %u@%lld)\n",
+ file->f_path.dentry->d_parent->d_name.name,
+ file->f_path.dentry->d_name.name,
+ mapping->host->i_ino, len, (long long) pos);
+
+ /*
+ * Zero any uninitialised parts of the page, and then mark the page
+ * as up to date if it turns out that we're extending the file.
+ */
+ if (!PageUptodate(page)) {
+ unsigned pglen = nfs_page_length(page);
+ unsigned end = offset + len;
+
+ if (pglen == 0) {
+ zero_user_segments(page, 0, offset,
+ end, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ } else if (end >= pglen) {
+ zero_user_segment(page, end, PAGE_CACHE_SIZE);
+ if (offset == 0)
+ SetPageUptodate(page);
+ } else
+ zero_user_segment(page, pglen, PAGE_CACHE_SIZE);
+ }
+
+ status = nfs_updatepage(file, page, offset, copied);
+
+ unlock_page(page);
+ page_cache_release(page);
+
+ if (status < 0)
+ return status;
+ return copied;
+}
+
+/*
+ * Partially or wholly invalidate a page
+ * - Release the private state associated with a page if undergoing complete
+ * page invalidation
+ * - Called if either PG_private or PG_fscache is set on the page
+ * - Caller holds page lock
+ */
+static void nfs_invalidate_page(struct page *page, unsigned long offset)
+{
+ dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
+
+ if (offset != 0)
+ return;
+ /* Cancel any unstarted writes on this page */
+ nfs_wb_page_cancel(page->mapping->host, page);
+
+ nfs_fscache_invalidate_page(page, page->mapping->host);
+}
+
+/*
+ * Attempt to release the private state associated with a page
+ * - Called if either PG_private or PG_fscache is set on the page
+ * - Caller holds page lock
+ * - Return true (may release page) or false (may not)
+ */
+static int nfs_release_page(struct page *page, gfp_t gfp)
+{
+ struct address_space *mapping = page->mapping;
+
+ dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+
+ /* Only do I/O if gfp is a superset of GFP_KERNEL */
+ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
+ int how = FLUSH_SYNC;
+
+ /* Don't let kswapd deadlock waiting for OOM RPC calls */
+ if (current_is_kswapd())
+ how = 0;
+ nfs_commit_inode(mapping->host, how);
+ }
+ /* If PagePrivate() is set, then the page is not freeable */
+ if (PagePrivate(page))
+ return 0;
+ return nfs_fscache_release_page(page, gfp);
+}
+
+/*
+ * Attempt to clear the private state associated with a page when an error
+ * occurs that requires the cached contents of an inode to be written back or
+ * destroyed
+ * - Called if either PG_private or fscache is set on the page
+ * - Caller holds page lock
+ * - Return 0 if successful, -error otherwise
+ */
+static int nfs_launder_page(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
+ inode->i_ino, (long long)page_offset(page));
+
+ nfs_fscache_wait_on_page_write(nfsi, page);
+ return nfs_wb_page(inode, page);
+}
+
+const struct address_space_operations nfs_file_aops = {
+ .readpage = nfs_readpage,
+ .readpages = nfs_readpages,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ .writepage = nfs_writepage,
+ .writepages = nfs_writepages,
+ .write_begin = nfs_write_begin,
+ .write_end = nfs_write_end,
+ .invalidatepage = nfs_invalidate_page,
+ .releasepage = nfs_release_page,
+ .direct_IO = nfs_direct_IO,
+ .migratepage = nfs_migrate_page,
+ .launder_page = nfs_launder_page,
+ .error_remove_page = generic_error_remove_page,
+};
+
+/*
+ * Notification that a PTE pointing to an NFS page is about to be made
+ * writable, implying that someone is about to modify the page through a
+ * shared-writable mapping
+ */
+static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *page = vmf->page;
+ struct file *filp = vma->vm_file;
+ struct dentry *dentry = filp->f_path.dentry;
+ unsigned pagelen;
+ int ret = VM_FAULT_NOPAGE;
+ struct address_space *mapping;
+
+ dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%s/%s(%ld), offset %lld)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ filp->f_mapping->host->i_ino,
+ (long long)page_offset(page));
+
+ /* make sure the cache has finished storing the page */
+ nfs_fscache_wait_on_page_write(NFS_I(dentry->d_inode), page);
+
+ lock_page(page);
+ mapping = page->mapping;
+ if (mapping != dentry->d_inode->i_mapping)
+ goto out_unlock;
+
+ wait_on_page_writeback(page);
+
+ pagelen = nfs_page_length(page);
+ if (pagelen == 0)
+ goto out_unlock;
+
+ ret = VM_FAULT_LOCKED;
+ if (nfs_flush_incompatible(filp, page) == 0 &&
+ nfs_updatepage(filp, page, 0, pagelen) == 0)
+ goto out;
+
+ ret = VM_FAULT_SIGBUS;
+out_unlock:
+ unlock_page(page);
+out:
+ return ret;
+}
+
+static const struct vm_operations_struct nfs_file_vm_ops = {
+ .fault = filemap_fault,
+ .page_mkwrite = nfs_vm_page_mkwrite,
+};
+
+static int nfs_need_sync_write(struct file *filp, struct inode *inode)
+{
+ struct nfs_open_context *ctx;
+
+ if (IS_SYNC(inode) || (filp->f_flags & O_DSYNC))
+ return 1;
+ ctx = nfs_file_open_context(filp);
+ if (test_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags))
+ return 1;
+ return 0;
+}
+
+static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct dentry * dentry = iocb->ki_filp->f_path.dentry;
+ struct inode * inode = dentry->d_inode;
+ unsigned long written = 0;
+ ssize_t result;
+ size_t count = iov_length(iov, nr_segs);
+
+ if (iocb->ki_filp->f_flags & O_DIRECT)
+ return nfs_file_direct_write(iocb, iov, nr_segs, pos);
+
+ dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) count, (long long) pos);
+
+ result = -EBUSY;
+ if (IS_SWAPFILE(inode))
+ goto out_swapfile;
+ /*
+ * O_APPEND implies that we must revalidate the file length.
+ */
+ if (iocb->ki_filp->f_flags & O_APPEND) {
+ result = nfs_revalidate_file_size(inode, iocb->ki_filp);
+ if (result)
+ goto out;
+ }
+
+ result = count;
+ if (!count)
+ goto out;
+
+ result = generic_file_aio_write(iocb, iov, nr_segs, pos);
+ if (result > 0)
+ written = result;
+
+ /* Return error values for O_DSYNC and IS_SYNC() */
+ if (result >= 0 && nfs_need_sync_write(iocb->ki_filp, inode)) {
+ int err = vfs_fsync(iocb->ki_filp, 0);
+ if (err < 0)
+ result = err;
+ }
+ if (result > 0)
+ nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
+out:
+ return result;
+
+out_swapfile:
+ printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
+ goto out;
+}
+
+static ssize_t nfs_file_splice_write(struct pipe_inode_info *pipe,
+ struct file *filp, loff_t *ppos,
+ size_t count, unsigned int flags)
+{
+ struct dentry *dentry = filp->f_path.dentry;
+ struct inode *inode = dentry->d_inode;
+ unsigned long written = 0;
+ ssize_t ret;
+
+ dprintk("NFS splice_write(%s/%s, %lu@%llu)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ (unsigned long) count, (unsigned long long) *ppos);
+
+ /*
+ * The combination of splice and an O_APPEND destination is disallowed.
+ */
+
+ ret = generic_file_splice_write(pipe, filp, ppos, count, flags);
+ if (ret > 0)
+ written = ret;
+
+ if (ret >= 0 && nfs_need_sync_write(filp, inode)) {
+ int err = vfs_fsync(filp, 0);
+ if (err < 0)
+ ret = err;
+ }
+ if (ret > 0)
+ nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
+ return ret;
+}
+
+static int
+do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
+{
+ struct inode *inode = filp->f_mapping->host;
+ int status = 0;
+ unsigned int saved_type = fl->fl_type;
+
+ /* Try local locking first */
+ posix_test_lock(filp, fl);
+ if (fl->fl_type != F_UNLCK) {
+ /* found a conflict */
+ goto out;
+ }
+ fl->fl_type = saved_type;
+
+ if (nfs_have_delegation(inode, FMODE_READ))
+ goto out_noconflict;
+
+ if (is_local)
+ goto out_noconflict;
+
+ status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+out:
+ return status;
+out_noconflict:
+ fl->fl_type = F_UNLCK;
+ goto out;
+}
+
+static int do_vfs_lock(struct file *file, struct file_lock *fl)
+{
+ int res = 0;
+ switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+ case FL_POSIX:
+ res = posix_lock_file_wait(file, fl);
+ break;
+ case FL_FLOCK:
+ res = flock_lock_file_wait(file, fl);
+ break;
+ default:
+ BUG();
+ }
+ return res;
+}
+
+static int
+do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
+{
+ struct inode *inode = filp->f_mapping->host;
+ int status;
+
+ /*
+ * Flush all pending writes before doing anything
+ * with locks..
+ */
+ nfs_sync_mapping(filp->f_mapping);
+
+ /* NOTE: special case
+ * If we're signalled while cleaning up locks on process exit, we
+ * still need to complete the unlock.
+ */
+ /*
+ * Use local locking if mounted with "-onolock" or with appropriate
+ * "-olocal_lock="
+ */
+ if (!is_local)
+ status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+ else
+ status = do_vfs_lock(filp, fl);
+ return status;
+}
+
+static int
+is_time_granular(struct timespec *ts) {
+ return ((ts->tv_sec == 0) && (ts->tv_nsec <= 1000));
+}
+
+static int
+do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
+{
+ struct inode *inode = filp->f_mapping->host;
+ int status;
+
+ /*
+ * Flush all pending writes before doing anything
+ * with locks..
+ */
+ status = nfs_sync_mapping(filp->f_mapping);
+ if (status != 0)
+ goto out;
+
+ /*
+ * Use local locking if mounted with "-onolock" or with appropriate
+ * "-olocal_lock="
+ */
+ if (!is_local)
+ status = NFS_PROTO(inode)->lock(filp, cmd, fl);
+ else
+ status = do_vfs_lock(filp, fl);
+ if (status < 0)
+ goto out;
+
+ /*
+ * Revalidate the cache if the server has time stamps granular
+ * enough to detect subsecond changes. Otherwise, clear the
+ * cache to prevent missing any changes.
+ *
+ * This makes locking act as a cache coherency point.
+ */
+ nfs_sync_mapping(filp->f_mapping);
+ if (!nfs_have_delegation(inode, FMODE_READ)) {
+ if (is_time_granular(&NFS_SERVER(inode)->time_delta))
+ __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ else
+ nfs_zap_caches(inode);
+ }
+out:
+ return status;
+}
+
+/*
+ * Lock a (portion of) a file
+ */
+static int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+ struct inode *inode = filp->f_mapping->host;
+ int ret = -ENOLCK;
+ int is_local = 0;
+
+ dprintk("NFS: lock(%s/%s, t=%x, fl=%x, r=%lld:%lld)\n",
+ filp->f_path.dentry->d_parent->d_name.name,
+ filp->f_path.dentry->d_name.name,
+ fl->fl_type, fl->fl_flags,
+ (long long)fl->fl_start, (long long)fl->fl_end);
+
+ nfs_inc_stats(inode, NFSIOS_VFSLOCK);
+
+ /* No mandatory locks over NFS */
+ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
+ goto out_err;
+
+ if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
+ is_local = 1;
+
+ if (NFS_PROTO(inode)->lock_check_bounds != NULL) {
+ ret = NFS_PROTO(inode)->lock_check_bounds(fl);
+ if (ret < 0)
+ goto out_err;
+ }
+
+ if (IS_GETLK(cmd))
+ ret = do_getlk(filp, cmd, fl, is_local);
+ else if (fl->fl_type == F_UNLCK)
+ ret = do_unlk(filp, cmd, fl, is_local);
+ else
+ ret = do_setlk(filp, cmd, fl, is_local);
+out_err:
+ return ret;
+}
+
+/*
+ * Lock a (portion of) a file
+ */
+static int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
+{
+ struct inode *inode = filp->f_mapping->host;
+ int is_local = 0;
+
+ dprintk("NFS: flock(%s/%s, t=%x, fl=%x)\n",
+ filp->f_path.dentry->d_parent->d_name.name,
+ filp->f_path.dentry->d_name.name,
+ fl->fl_type, fl->fl_flags);
+
+ if (!(fl->fl_flags & FL_FLOCK))
+ return -ENOLCK;
+
+ if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
+ is_local = 1;
+
+ /* We're simulating flock() locks using posix locks on the server */
+ fl->fl_owner = (fl_owner_t)filp;
+ fl->fl_start = 0;
+ fl->fl_end = OFFSET_MAX;
+
+ if (fl->fl_type == F_UNLCK)
+ return do_unlk(filp, cmd, fl, is_local);
+ return do_setlk(filp, cmd, fl, is_local);
+}
+
+/*
+ * There is no protocol support for leases, so we have no way to implement
+ * them correctly in the face of opens by other clients.
+ */
+static int nfs_setlease(struct file *file, long arg, struct file_lock **fl)
+{
+ dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
+ file->f_path.dentry->d_parent->d_name.name,
+ file->f_path.dentry->d_name.name, arg);
+ return -EINVAL;
+}
+
+const struct file_operations nfs_file_operations = {
+ .llseek = nfs_file_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = nfs_file_read,
+ .aio_write = nfs_file_write,
+ .mmap = nfs_file_mmap,
+ .open = nfs_file_open,
+ .flush = nfs_file_flush,
+ .release = nfs_file_release,
+ .fsync = nfs_file_fsync,
+ .lock = nfs_lock,
+ .flock = nfs_flock,
+ .splice_read = nfs_file_splice_read,
+ .splice_write = nfs_file_splice_write,
+ .check_flags = nfs_check_flags,
+ .setlease = nfs_setlease,
+};
+
+#ifdef CONFIG_NFS_V4
+static int
+nfs4_file_open(struct inode *inode, struct file *filp)
+{
+ /*
+ * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to
+ * this point, then something is very wrong
+ */
+ dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp);
+ return -ENOTDIR;
+}
+
+const struct file_operations nfs4_file_operations = {
+ .llseek = nfs_file_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = nfs_file_read,
+ .aio_write = nfs_file_write,
+ .mmap = nfs_file_mmap,
+ .open = nfs4_file_open,
+ .flush = nfs_file_flush,
+ .release = nfs_file_release,
+ .fsync = nfs_file_fsync,
+ .lock = nfs_lock,
+ .flock = nfs_flock,
+ .splice_read = nfs_file_splice_read,
+ .splice_write = nfs_file_splice_write,
+ .check_flags = nfs_check_flags,
+ .setlease = nfs_setlease,
+};
+#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
new file mode 100644
index 00000000..7cf2c469
--- /dev/null
+++ b/fs/nfs/fscache-index.c
@@ -0,0 +1,337 @@
+/* NFS FS-Cache index structure definition
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/in6.h>
+
+#include "internal.h"
+#include "fscache.h"
+
+#define NFSDBG_FACILITY NFSDBG_FSCACHE
+
+/*
+ * Define the NFS filesystem for FS-Cache. Upon registration FS-Cache sticks
+ * the cookie for the top-level index object for NFS into here. The top-level
+ * index can than have other cache objects inserted into it.
+ */
+struct fscache_netfs nfs_fscache_netfs = {
+ .name = "nfs",
+ .version = 0,
+};
+
+/*
+ * Register NFS for caching
+ */
+int nfs_fscache_register(void)
+{
+ return fscache_register_netfs(&nfs_fscache_netfs);
+}
+
+/*
+ * Unregister NFS for caching
+ */
+void nfs_fscache_unregister(void)
+{
+ fscache_unregister_netfs(&nfs_fscache_netfs);
+}
+
+/*
+ * Layout of the key for an NFS server cache object.
+ */
+struct nfs_server_key {
+ uint16_t nfsversion; /* NFS protocol version */
+ uint16_t family; /* address family */
+ uint16_t port; /* IP port */
+ union {
+ struct in_addr ipv4_addr; /* IPv4 address */
+ struct in6_addr ipv6_addr; /* IPv6 address */
+ } addr[0];
+};
+
+/*
+ * Generate a key to describe a server in the main NFS index
+ * - We return the length of the key, or 0 if we can't generate one
+ */
+static uint16_t nfs_server_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct nfs_client *clp = cookie_netfs_data;
+ const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
+ const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
+ struct nfs_server_key *key = buffer;
+ uint16_t len = sizeof(struct nfs_server_key);
+
+ key->nfsversion = clp->rpc_ops->version;
+ key->family = clp->cl_addr.ss_family;
+
+ memset(key, 0, len);
+
+ switch (clp->cl_addr.ss_family) {
+ case AF_INET:
+ key->port = sin->sin_port;
+ key->addr[0].ipv4_addr = sin->sin_addr;
+ len += sizeof(key->addr[0].ipv4_addr);
+ break;
+
+ case AF_INET6:
+ key->port = sin6->sin6_port;
+ key->addr[0].ipv6_addr = sin6->sin6_addr;
+ len += sizeof(key->addr[0].ipv6_addr);
+ break;
+
+ default:
+ printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
+ clp->cl_addr.ss_family);
+ len = 0;
+ break;
+ }
+
+ return len;
+}
+
+/*
+ * Define the server object for FS-Cache. This is used to describe a server
+ * object to fscache_acquire_cookie(). It is keyed by the NFS protocol and
+ * server address parameters.
+ */
+const struct fscache_cookie_def nfs_fscache_server_index_def = {
+ .name = "NFS.server",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = nfs_server_get_key,
+};
+
+/*
+ * Generate a key to describe a superblock key in the main NFS index
+ */
+static uint16_t nfs_super_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct nfs_fscache_key *key;
+ const struct nfs_server *nfss = cookie_netfs_data;
+ uint16_t len;
+
+ key = nfss->fscache_key;
+ len = sizeof(key->key) + key->key.uniq_len;
+ if (len > bufmax) {
+ len = 0;
+ } else {
+ memcpy(buffer, &key->key, sizeof(key->key));
+ memcpy(buffer + sizeof(key->key),
+ key->key.uniquifier, key->key.uniq_len);
+ }
+
+ return len;
+}
+
+/*
+ * Define the superblock object for FS-Cache. This is used to describe a
+ * superblock object to fscache_acquire_cookie(). It is keyed by all the NFS
+ * parameters that might cause a separate superblock.
+ */
+const struct fscache_cookie_def nfs_fscache_super_index_def = {
+ .name = "NFS.super",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = nfs_super_get_key,
+};
+
+/*
+ * Definition of the auxiliary data attached to NFS inode storage objects
+ * within the cache.
+ *
+ * The contents of this struct are recorded in the on-disk local cache in the
+ * auxiliary data attached to the data storage object backing an inode. This
+ * permits coherency to be managed when a new inode binds to an already extant
+ * cache object.
+ */
+struct nfs_fscache_inode_auxdata {
+ struct timespec mtime;
+ struct timespec ctime;
+ loff_t size;
+ u64 change_attr;
+};
+
+/*
+ * Generate a key to describe an NFS inode in an NFS server's index
+ */
+static uint16_t nfs_fscache_inode_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct nfs_inode *nfsi = cookie_netfs_data;
+ uint16_t nsize;
+
+ /* use the inode's NFS filehandle as the key */
+ nsize = nfsi->fh.size;
+ memcpy(buffer, nfsi->fh.data, nsize);
+ return nsize;
+}
+
+/*
+ * Get certain file attributes from the netfs data
+ * - This function can be absent for an index
+ * - Not permitted to return an error
+ * - The netfs data from the cookie being used as the source is presented
+ */
+static void nfs_fscache_inode_get_attr(const void *cookie_netfs_data,
+ uint64_t *size)
+{
+ const struct nfs_inode *nfsi = cookie_netfs_data;
+
+ *size = nfsi->vfs_inode.i_size;
+}
+
+/*
+ * Get the auxiliary data from netfs data
+ * - This function can be absent if the index carries no state data
+ * - Should store the auxiliary data in the buffer
+ * - Should return the amount of amount stored
+ * - Not permitted to return an error
+ * - The netfs data from the cookie being used as the source is presented
+ */
+static uint16_t nfs_fscache_inode_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ struct nfs_fscache_inode_auxdata auxdata;
+ const struct nfs_inode *nfsi = cookie_netfs_data;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+ auxdata.size = nfsi->vfs_inode.i_size;
+ auxdata.mtime = nfsi->vfs_inode.i_mtime;
+ auxdata.ctime = nfsi->vfs_inode.i_ctime;
+
+ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
+ auxdata.change_attr = nfsi->vfs_inode.i_version;
+
+ if (bufmax > sizeof(auxdata))
+ bufmax = sizeof(auxdata);
+
+ memcpy(buffer, &auxdata, bufmax);
+ return bufmax;
+}
+
+/*
+ * Consult the netfs about the state of an object
+ * - This function can be absent if the index carries no state data
+ * - The netfs data from the cookie being used as the target is
+ * presented, as is the auxiliary data
+ */
+static
+enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
+ const void *data,
+ uint16_t datalen)
+{
+ struct nfs_fscache_inode_auxdata auxdata;
+ struct nfs_inode *nfsi = cookie_netfs_data;
+
+ if (datalen != sizeof(auxdata))
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+ auxdata.size = nfsi->vfs_inode.i_size;
+ auxdata.mtime = nfsi->vfs_inode.i_mtime;
+ auxdata.ctime = nfsi->vfs_inode.i_ctime;
+
+ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
+ auxdata.change_attr = nfsi->vfs_inode.i_version;
+
+ if (memcmp(data, &auxdata, datalen) != 0)
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ return FSCACHE_CHECKAUX_OKAY;
+}
+
+/*
+ * Indication from FS-Cache that the cookie is no longer cached
+ * - This function is called when the backing store currently caching a cookie
+ * is removed
+ * - The netfs should use this to clean up any markers indicating cached pages
+ * - This is mandatory for any object that may have data
+ */
+static void nfs_fscache_inode_now_uncached(void *cookie_netfs_data)
+{
+ struct nfs_inode *nfsi = cookie_netfs_data;
+ struct pagevec pvec;
+ pgoff_t first;
+ int loop, nr_pages;
+
+ pagevec_init(&pvec, 0);
+ first = 0;
+
+ dprintk("NFS: nfs_inode_now_uncached: nfs_inode 0x%p\n", nfsi);
+
+ for (;;) {
+ /* grab a bunch of pages to unmark */
+ nr_pages = pagevec_lookup(&pvec,
+ nfsi->vfs_inode.i_mapping,
+ first,
+ PAGEVEC_SIZE - pagevec_count(&pvec));
+ if (!nr_pages)
+ break;
+
+ for (loop = 0; loop < nr_pages; loop++)
+ ClearPageFsCache(pvec.pages[loop]);
+
+ first = pvec.pages[nr_pages - 1]->index + 1;
+
+ pvec.nr = nr_pages;
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+}
+
+/*
+ * Get an extra reference on a read context.
+ * - This function can be absent if the completion function doesn't require a
+ * context.
+ * - The read context is passed back to NFS in the event that a data read on the
+ * cache fails with EIO - in which case the server must be contacted to
+ * retrieve the data, which requires the read context for security.
+ */
+static void nfs_fh_get_context(void *cookie_netfs_data, void *context)
+{
+ get_nfs_open_context(context);
+}
+
+/*
+ * Release an extra reference on a read context.
+ * - This function can be absent if the completion function doesn't require a
+ * context.
+ */
+static void nfs_fh_put_context(void *cookie_netfs_data, void *context)
+{
+ if (context)
+ put_nfs_open_context(context);
+}
+
+/*
+ * Define the inode object for FS-Cache. This is used to describe an inode
+ * object to fscache_acquire_cookie(). It is keyed by the NFS file handle for
+ * an inode.
+ *
+ * Coherency is managed by comparing the copies of i_size, i_mtime and i_ctime
+ * held in the cache auxiliary data for the data storage object with those in
+ * the inode struct in memory.
+ */
+const struct fscache_cookie_def nfs_fscache_inode_object_def = {
+ .name = "NFS.fh",
+ .type = FSCACHE_COOKIE_TYPE_DATAFILE,
+ .get_key = nfs_fscache_inode_get_key,
+ .get_attr = nfs_fscache_inode_get_attr,
+ .get_aux = nfs_fscache_inode_get_aux,
+ .check_aux = nfs_fscache_inode_check_aux,
+ .now_uncached = nfs_fscache_inode_now_uncached,
+ .get_context = nfs_fh_get_context,
+ .put_context = nfs_fh_put_context,
+};
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
new file mode 100644
index 00000000..ae65c16b
--- /dev/null
+++ b/fs/nfs/fscache.c
@@ -0,0 +1,535 @@
+/* NFS filesystem cache interface
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/in6.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+#include "iostat.h"
+#include "fscache.h"
+
+#define NFSDBG_FACILITY NFSDBG_FSCACHE
+
+static struct rb_root nfs_fscache_keys = RB_ROOT;
+static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
+
+/*
+ * Get the per-client index cookie for an NFS client if the appropriate mount
+ * flag was set
+ * - We always try and get an index cookie for the client, but get filehandle
+ * cookies on a per-superblock basis, depending on the mount flags
+ */
+void nfs_fscache_get_client_cookie(struct nfs_client *clp)
+{
+ /* create a cache index for looking up filehandles */
+ clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
+ &nfs_fscache_server_index_def,
+ clp);
+ dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
+ clp, clp->fscache);
+}
+
+/*
+ * Dispose of a per-client cookie
+ */
+void nfs_fscache_release_client_cookie(struct nfs_client *clp)
+{
+ dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
+ clp, clp->fscache);
+
+ fscache_relinquish_cookie(clp->fscache, 0);
+ clp->fscache = NULL;
+}
+
+/*
+ * Get the cache cookie for an NFS superblock. We have to handle
+ * uniquification here because the cache doesn't do it for us.
+ *
+ * The default uniquifier is just an empty string, but it may be overridden
+ * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
+ * superblock across an automount point of some nature.
+ */
+void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq,
+ struct nfs_clone_mount *mntdata)
+{
+ struct nfs_fscache_key *key, *xkey;
+ struct nfs_server *nfss = NFS_SB(sb);
+ struct rb_node **p, *parent;
+ int diff, ulen;
+
+ if (uniq) {
+ ulen = strlen(uniq);
+ } else if (mntdata) {
+ struct nfs_server *mnt_s = NFS_SB(mntdata->sb);
+ if (mnt_s->fscache_key) {
+ uniq = mnt_s->fscache_key->key.uniquifier;
+ ulen = mnt_s->fscache_key->key.uniq_len;
+ }
+ }
+
+ if (!uniq) {
+ uniq = "";
+ ulen = 1;
+ }
+
+ key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
+ if (!key)
+ return;
+
+ key->nfs_client = nfss->nfs_client;
+ key->key.super.s_flags = sb->s_flags & NFS_MS_MASK;
+ key->key.nfs_server.flags = nfss->flags;
+ key->key.nfs_server.rsize = nfss->rsize;
+ key->key.nfs_server.wsize = nfss->wsize;
+ key->key.nfs_server.acregmin = nfss->acregmin;
+ key->key.nfs_server.acregmax = nfss->acregmax;
+ key->key.nfs_server.acdirmin = nfss->acdirmin;
+ key->key.nfs_server.acdirmax = nfss->acdirmax;
+ key->key.nfs_server.fsid = nfss->fsid;
+ key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
+
+ key->key.uniq_len = ulen;
+ memcpy(key->key.uniquifier, uniq, ulen);
+
+ spin_lock(&nfs_fscache_keys_lock);
+ p = &nfs_fscache_keys.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ xkey = rb_entry(parent, struct nfs_fscache_key, node);
+
+ if (key->nfs_client < xkey->nfs_client)
+ goto go_left;
+ if (key->nfs_client > xkey->nfs_client)
+ goto go_right;
+
+ diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
+ if (diff < 0)
+ goto go_left;
+ if (diff > 0)
+ goto go_right;
+
+ if (key->key.uniq_len == 0)
+ goto non_unique;
+ diff = memcmp(key->key.uniquifier,
+ xkey->key.uniquifier,
+ key->key.uniq_len);
+ if (diff < 0)
+ goto go_left;
+ if (diff > 0)
+ goto go_right;
+ goto non_unique;
+
+ go_left:
+ p = &(*p)->rb_left;
+ continue;
+ go_right:
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&key->node, parent, p);
+ rb_insert_color(&key->node, &nfs_fscache_keys);
+ spin_unlock(&nfs_fscache_keys_lock);
+ nfss->fscache_key = key;
+
+ /* create a cache index for looking up filehandles */
+ nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
+ &nfs_fscache_super_index_def,
+ nfss);
+ dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
+ nfss, nfss->fscache);
+ return;
+
+non_unique:
+ spin_unlock(&nfs_fscache_keys_lock);
+ kfree(key);
+ nfss->fscache_key = NULL;
+ nfss->fscache = NULL;
+ printk(KERN_WARNING "NFS:"
+ " Cache request denied due to non-unique superblock keys\n");
+}
+
+/*
+ * release a per-superblock cookie
+ */
+void nfs_fscache_release_super_cookie(struct super_block *sb)
+{
+ struct nfs_server *nfss = NFS_SB(sb);
+
+ dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
+ nfss, nfss->fscache);
+
+ fscache_relinquish_cookie(nfss->fscache, 0);
+ nfss->fscache = NULL;
+
+ if (nfss->fscache_key) {
+ spin_lock(&nfs_fscache_keys_lock);
+ rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
+ spin_unlock(&nfs_fscache_keys_lock);
+ kfree(nfss->fscache_key);
+ nfss->fscache_key = NULL;
+ }
+}
+
+/*
+ * Initialise the per-inode cache cookie pointer for an NFS inode.
+ */
+void nfs_fscache_init_inode_cookie(struct inode *inode)
+{
+ NFS_I(inode)->fscache = NULL;
+ if (S_ISREG(inode->i_mode))
+ set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+}
+
+/*
+ * Get the per-inode cache cookie for an NFS inode.
+ */
+static void nfs_fscache_enable_inode_cookie(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ if (nfsi->fscache || !NFS_FSCACHE(inode))
+ return;
+
+ if ((NFS_SB(sb)->options & NFS_OPTION_FSCACHE)) {
+ nfsi->fscache = fscache_acquire_cookie(
+ NFS_SB(sb)->fscache,
+ &nfs_fscache_inode_object_def,
+ nfsi);
+
+ dfprintk(FSCACHE, "NFS: get FH cookie (0x%p/0x%p/0x%p)\n",
+ sb, nfsi, nfsi->fscache);
+ }
+}
+
+/*
+ * Release a per-inode cookie.
+ */
+void nfs_fscache_release_inode_cookie(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n",
+ nfsi, nfsi->fscache);
+
+ fscache_relinquish_cookie(nfsi->fscache, 0);
+ nfsi->fscache = NULL;
+}
+
+/*
+ * Retire a per-inode cookie, destroying the data attached to it.
+ */
+void nfs_fscache_zap_inode_cookie(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ dfprintk(FSCACHE, "NFS: zapping cookie (0x%p/0x%p)\n",
+ nfsi, nfsi->fscache);
+
+ fscache_relinquish_cookie(nfsi->fscache, 1);
+ nfsi->fscache = NULL;
+}
+
+/*
+ * Turn off the cache with regard to a per-inode cookie if opened for writing,
+ * invalidating all the pages in the page cache relating to the associated
+ * inode to clear the per-page caching.
+ */
+static void nfs_fscache_disable_inode_cookie(struct inode *inode)
+{
+ clear_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+
+ if (NFS_I(inode)->fscache) {
+ dfprintk(FSCACHE,
+ "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
+
+ /* Need to uncache any pages attached to this inode that
+ * fscache knows about before turning off the cache.
+ */
+ fscache_uncache_all_inode_pages(NFS_I(inode)->fscache, inode);
+ nfs_fscache_zap_inode_cookie(inode);
+ }
+}
+
+/*
+ * wait_on_bit() sleep function for uninterruptible waiting
+ */
+static int nfs_fscache_wait_bit(void *flags)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * Lock against someone else trying to also acquire or relinquish a cookie
+ */
+static inline void nfs_fscache_inode_lock(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ while (test_and_set_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags))
+ wait_on_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK,
+ nfs_fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+}
+
+/*
+ * Unlock cookie management lock
+ */
+static inline void nfs_fscache_inode_unlock(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ smp_mb__before_clear_bit();
+ clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK);
+}
+
+/*
+ * Decide if we should enable or disable local caching for this inode.
+ * - For now, with NFS, only regular files that are open read-only will be able
+ * to use the cache.
+ * - May be invoked multiple times in parallel by parallel nfs_open() functions.
+ */
+void nfs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
+{
+ if (NFS_FSCACHE(inode)) {
+ nfs_fscache_inode_lock(inode);
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ nfs_fscache_disable_inode_cookie(inode);
+ else
+ nfs_fscache_enable_inode_cookie(inode);
+ nfs_fscache_inode_unlock(inode);
+ }
+}
+
+/*
+ * Replace a per-inode cookie due to revalidation detecting a file having
+ * changed on the server.
+ */
+void nfs_fscache_reset_inode_cookie(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_server *nfss = NFS_SERVER(inode);
+ NFS_IFDEBUG(struct fscache_cookie *old = nfsi->fscache);
+
+ nfs_fscache_inode_lock(inode);
+ if (nfsi->fscache) {
+ /* retire the current fscache cache and get a new one */
+ fscache_relinquish_cookie(nfsi->fscache, 1);
+
+ nfsi->fscache = fscache_acquire_cookie(
+ nfss->nfs_client->fscache,
+ &nfs_fscache_inode_object_def,
+ nfsi);
+
+ dfprintk(FSCACHE,
+ "NFS: revalidation new cookie (0x%p/0x%p/0x%p/0x%p)\n",
+ nfss, nfsi, old, nfsi->fscache);
+ }
+ nfs_fscache_inode_unlock(inode);
+}
+
+/*
+ * Release the caching state associated with a page, if the page isn't busy
+ * interacting with the cache.
+ * - Returns true (can release page) or false (page busy).
+ */
+int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ if (PageFsCache(page)) {
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+ struct fscache_cookie *cookie = nfsi->fscache;
+
+ BUG_ON(!cookie);
+ dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
+ cookie, page, nfsi);
+
+ if (!fscache_maybe_release_page(cookie, page, gfp))
+ return 0;
+
+ nfs_add_fscache_stats(page->mapping->host,
+ NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
+ }
+
+ return 1;
+}
+
+/*
+ * Release the caching state associated with a page if undergoing complete page
+ * invalidation.
+ */
+void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct fscache_cookie *cookie = nfsi->fscache;
+
+ BUG_ON(!cookie);
+
+ dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
+ cookie, page, nfsi);
+
+ fscache_wait_on_page_write(cookie, page);
+
+ BUG_ON(!PageLocked(page));
+ fscache_uncache_page(cookie, page);
+ nfs_add_fscache_stats(page->mapping->host,
+ NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
+}
+
+/*
+ * Handle completion of a page being read from the cache.
+ * - Called in process (keventd) context.
+ */
+static void nfs_readpage_from_fscache_complete(struct page *page,
+ void *context,
+ int error)
+{
+ dfprintk(FSCACHE,
+ "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
+ page, context, error);
+
+ /* if the read completes with an error, we just unlock the page and let
+ * the VM reissue the readpage */
+ if (!error) {
+ SetPageUptodate(page);
+ unlock_page(page);
+ } else {
+ error = nfs_readpage_async(context, page->mapping->host, page);
+ if (error)
+ unlock_page(page);
+ }
+}
+
+/*
+ * Retrieve a page from fscache
+ */
+int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode, struct page *page)
+{
+ int ret;
+
+ dfprintk(FSCACHE,
+ "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
+ NFS_I(inode)->fscache, page, page->index, page->flags, inode);
+
+ ret = fscache_read_or_alloc_page(NFS_I(inode)->fscache,
+ page,
+ nfs_readpage_from_fscache_complete,
+ ctx,
+ GFP_KERNEL);
+
+ switch (ret) {
+ case 0: /* read BIO submitted (page in fscache) */
+ dfprintk(FSCACHE,
+ "NFS: readpage_from_fscache: BIO submitted\n");
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK, 1);
+ return ret;
+
+ case -ENOBUFS: /* inode not in cache */
+ case -ENODATA: /* page not in cache */
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1);
+ dfprintk(FSCACHE,
+ "NFS: readpage_from_fscache %d\n", ret);
+ return 1;
+
+ default:
+ dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret);
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1);
+ }
+ return ret;
+}
+
+/*
+ * Retrieve a set of pages from fscache
+ */
+int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ unsigned npages = *nr_pages;
+ int ret;
+
+ dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
+ NFS_I(inode)->fscache, npages, inode);
+
+ ret = fscache_read_or_alloc_pages(NFS_I(inode)->fscache,
+ mapping, pages, nr_pages,
+ nfs_readpage_from_fscache_complete,
+ ctx,
+ mapping_gfp_mask(mapping));
+ if (*nr_pages < npages)
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
+ npages);
+ if (*nr_pages > 0)
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
+ *nr_pages);
+
+ switch (ret) {
+ case 0: /* read submitted to the cache for all pages */
+ BUG_ON(!list_empty(pages));
+ BUG_ON(*nr_pages != 0);
+ dfprintk(FSCACHE,
+ "NFS: nfs_getpages_from_fscache: submitted\n");
+
+ return ret;
+
+ case -ENOBUFS: /* some pages aren't cached and can't be */
+ case -ENODATA: /* some pages aren't cached */
+ dfprintk(FSCACHE,
+ "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
+ return 1;
+
+ default:
+ dfprintk(FSCACHE,
+ "NFS: nfs_getpages_from_fscache: ret %d\n", ret);
+ }
+
+ return ret;
+}
+
+/*
+ * Store a newly fetched page in fscache
+ * - PG_fscache must be set on the page
+ */
+void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
+{
+ int ret;
+
+ dfprintk(FSCACHE,
+ "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
+ NFS_I(inode)->fscache, page, page->index, page->flags, sync);
+
+ ret = fscache_write_page(NFS_I(inode)->fscache, page, GFP_KERNEL);
+ dfprintk(FSCACHE,
+ "NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
+ page, page->index, page->flags, ret);
+
+ if (ret != 0) {
+ fscache_uncache_page(NFS_I(inode)->fscache, page);
+ nfs_add_fscache_stats(inode,
+ NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, 1);
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
+ } else {
+ nfs_add_fscache_stats(inode,
+ NFSIOS_FSCACHE_PAGES_WRITTEN_OK, 1);
+ }
+}
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
new file mode 100644
index 00000000..b9c572d0
--- /dev/null
+++ b/fs/nfs/fscache.h
@@ -0,0 +1,222 @@
+/* NFS filesystem cache interface definitions
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _NFS_FSCACHE_H
+#define _NFS_FSCACHE_H
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include <linux/fscache.h>
+
+#ifdef CONFIG_NFS_FSCACHE
+
+/*
+ * set of NFS FS-Cache objects that form a superblock key
+ */
+struct nfs_fscache_key {
+ struct rb_node node;
+ struct nfs_client *nfs_client; /* the server */
+
+ /* the elements of the unique key - as used by nfs_compare_super() and
+ * nfs_compare_mount_options() to distinguish superblocks */
+ struct {
+ struct {
+ unsigned long s_flags; /* various flags
+ * (& NFS_MS_MASK) */
+ } super;
+
+ struct {
+ struct nfs_fsid fsid;
+ int flags;
+ unsigned int rsize; /* read size */
+ unsigned int wsize; /* write size */
+ unsigned int acregmin; /* attr cache timeouts */
+ unsigned int acregmax;
+ unsigned int acdirmin;
+ unsigned int acdirmax;
+ } nfs_server;
+
+ struct {
+ rpc_authflavor_t au_flavor;
+ } rpc_auth;
+
+ /* uniquifier - can be used if nfs_server.flags includes
+ * NFS_MOUNT_UNSHARED */
+ u8 uniq_len;
+ char uniquifier[0];
+ } key;
+};
+
+/*
+ * fscache-index.c
+ */
+extern struct fscache_netfs nfs_fscache_netfs;
+extern const struct fscache_cookie_def nfs_fscache_server_index_def;
+extern const struct fscache_cookie_def nfs_fscache_super_index_def;
+extern const struct fscache_cookie_def nfs_fscache_inode_object_def;
+
+extern int nfs_fscache_register(void);
+extern void nfs_fscache_unregister(void);
+
+/*
+ * fscache.c
+ */
+extern void nfs_fscache_get_client_cookie(struct nfs_client *);
+extern void nfs_fscache_release_client_cookie(struct nfs_client *);
+
+extern void nfs_fscache_get_super_cookie(struct super_block *,
+ const char *,
+ struct nfs_clone_mount *);
+extern void nfs_fscache_release_super_cookie(struct super_block *);
+
+extern void nfs_fscache_init_inode_cookie(struct inode *);
+extern void nfs_fscache_release_inode_cookie(struct inode *);
+extern void nfs_fscache_zap_inode_cookie(struct inode *);
+extern void nfs_fscache_set_inode_cookie(struct inode *, struct file *);
+extern void nfs_fscache_reset_inode_cookie(struct inode *);
+
+extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
+extern int nfs_fscache_release_page(struct page *, gfp_t);
+
+extern int __nfs_readpage_from_fscache(struct nfs_open_context *,
+ struct inode *, struct page *);
+extern int __nfs_readpages_from_fscache(struct nfs_open_context *,
+ struct inode *, struct address_space *,
+ struct list_head *, unsigned *);
+extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int);
+
+/*
+ * wait for a page to complete writing to the cache
+ */
+static inline void nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi,
+ struct page *page)
+{
+ if (PageFsCache(page))
+ fscache_wait_on_page_write(nfsi->fscache, page);
+}
+
+/*
+ * release the caching state associated with a page if undergoing complete page
+ * invalidation
+ */
+static inline void nfs_fscache_invalidate_page(struct page *page,
+ struct inode *inode)
+{
+ if (PageFsCache(page))
+ __nfs_fscache_invalidate_page(page, inode);
+}
+
+/*
+ * Retrieve a page from an inode data storage object.
+ */
+static inline int nfs_readpage_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct page *page)
+{
+ if (NFS_I(inode)->fscache)
+ return __nfs_readpage_from_fscache(ctx, inode, page);
+ return -ENOBUFS;
+}
+
+/*
+ * Retrieve a set of pages from an inode data storage object.
+ */
+static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ if (NFS_I(inode)->fscache)
+ return __nfs_readpages_from_fscache(ctx, inode, mapping, pages,
+ nr_pages);
+ return -ENOBUFS;
+}
+
+/*
+ * Store a page newly fetched from the server in an inode data storage object
+ * in the cache.
+ */
+static inline void nfs_readpage_to_fscache(struct inode *inode,
+ struct page *page,
+ int sync)
+{
+ if (PageFsCache(page))
+ __nfs_readpage_to_fscache(inode, page, sync);
+}
+
+/*
+ * indicate the client caching state as readable text
+ */
+static inline const char *nfs_server_fscache_state(struct nfs_server *server)
+{
+ if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
+ return "yes";
+ return "no ";
+}
+
+
+#else /* CONFIG_NFS_FSCACHE */
+static inline int nfs_fscache_register(void) { return 0; }
+static inline void nfs_fscache_unregister(void) {}
+
+static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
+static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
+
+static inline void nfs_fscache_get_super_cookie(
+ struct super_block *sb,
+ const char *uniq,
+ struct nfs_clone_mount *mntdata)
+{
+}
+static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
+
+static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_release_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_zap_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_set_inode_cookie(struct inode *inode,
+ struct file *filp) {}
+static inline void nfs_fscache_reset_inode_cookie(struct inode *inode) {}
+
+static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ return 1; /* True: may release page */
+}
+static inline void nfs_fscache_invalidate_page(struct page *page,
+ struct inode *inode) {}
+static inline void nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi,
+ struct page *page) {}
+
+static inline int nfs_readpage_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct page *page)
+{
+ return -ENOBUFS;
+}
+static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ return -ENOBUFS;
+}
+static inline void nfs_readpage_to_fscache(struct inode *inode,
+ struct page *page, int sync) {}
+
+static inline const char *nfs_server_fscache_state(struct nfs_server *server)
+{
+ return "no ";
+}
+
+#endif /* CONFIG_NFS_FSCACHE */
+#endif /* _NFS_FSCACHE_H */
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
new file mode 100644
index 00000000..4ca6f5c8
--- /dev/null
+++ b/fs/nfs/getroot.c
@@ -0,0 +1,264 @@
+/* getroot.c: get the root dentry for an NFS mount
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include <linux/lockd/bind.h>
+#include <linux/seq_file.h>
+#include <linux/mount.h>
+#include <linux/nfs_idmap.h>
+#include <linux/vfs.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+
+#include <asm/uaccess.h>
+
+#include "nfs4_fs.h"
+#include "delegation.h"
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_CLIENT
+
+/*
+ * Set the superblock root dentry.
+ * Note that this function frees the inode in case of error.
+ */
+static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *inode)
+{
+ /* The mntroot acts as the dummy root dentry for this superblock */
+ if (sb->s_root == NULL) {
+ sb->s_root = d_make_root(inode);
+ if (sb->s_root == NULL)
+ return -ENOMEM;
+ ihold(inode);
+ /*
+ * Ensure that this dentry is invisible to d_find_alias().
+ * Otherwise, it may be spliced into the tree by
+ * d_materialise_unique if a parent directory from the same
+ * filesystem gets mounted at a later time.
+ * This again causes shrink_dcache_for_umount_subtree() to
+ * Oops, since the test for IS_ROOT() will fail.
+ */
+ spin_lock(&sb->s_root->d_inode->i_lock);
+ spin_lock(&sb->s_root->d_lock);
+ list_del_init(&sb->s_root->d_alias);
+ spin_unlock(&sb->s_root->d_lock);
+ spin_unlock(&sb->s_root->d_inode->i_lock);
+ }
+ return 0;
+}
+
+/*
+ * get an NFS2/NFS3 root dentry from the root filehandle
+ */
+struct dentry *nfs_get_root(struct super_block *sb, struct nfs_fh *mntfh,
+ const char *devname)
+{
+ struct nfs_server *server = NFS_SB(sb);
+ struct nfs_fsinfo fsinfo;
+ struct dentry *ret;
+ struct inode *inode;
+ void *name = kstrdup(devname, GFP_KERNEL);
+ int error;
+
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+
+ /* get the actual root for this mount */
+ fsinfo.fattr = nfs_alloc_fattr();
+ if (fsinfo.fattr == NULL) {
+ kfree(name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ error = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
+ if (error < 0) {
+ dprintk("nfs_get_root: getattr error = %d\n", -error);
+ ret = ERR_PTR(error);
+ goto out;
+ }
+
+ inode = nfs_fhget(sb, mntfh, fsinfo.fattr);
+ if (IS_ERR(inode)) {
+ dprintk("nfs_get_root: get root inode failed\n");
+ ret = ERR_CAST(inode);
+ goto out;
+ }
+
+ error = nfs_superblock_set_dummy_root(sb, inode);
+ if (error != 0) {
+ ret = ERR_PTR(error);
+ goto out;
+ }
+
+ /* root dentries normally start off anonymous and get spliced in later
+ * if the dentry tree reaches them; however if the dentry already
+ * exists, we'll pick it up at this point and use it as the root
+ */
+ ret = d_obtain_alias(inode);
+ if (IS_ERR(ret)) {
+ dprintk("nfs_get_root: get root dentry failed\n");
+ goto out;
+ }
+
+ security_d_instantiate(ret, inode);
+ spin_lock(&ret->d_lock);
+ if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
+ ret->d_fsdata = name;
+ name = NULL;
+ }
+ spin_unlock(&ret->d_lock);
+out:
+ if (name)
+ kfree(name);
+ nfs_free_fattr(fsinfo.fattr);
+ return ret;
+}
+
+#ifdef CONFIG_NFS_V4
+
+int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
+{
+ struct nfs_fsinfo fsinfo;
+ int ret = -ENOMEM;
+
+ dprintk("--> nfs4_get_rootfh()\n");
+
+ fsinfo.fattr = nfs_alloc_fattr();
+ if (fsinfo.fattr == NULL)
+ goto out;
+
+ /* Start by getting the root filehandle from the server */
+ ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo);
+ if (ret < 0) {
+ dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
+ goto out;
+ }
+
+ if (!(fsinfo.fattr->valid & NFS_ATTR_FATTR_TYPE)
+ || !S_ISDIR(fsinfo.fattr->mode)) {
+ printk(KERN_ERR "nfs4_get_rootfh:"
+ " getroot encountered non-directory\n");
+ ret = -ENOTDIR;
+ goto out;
+ }
+
+ if (fsinfo.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
+ printk(KERN_ERR "nfs4_get_rootfh:"
+ " getroot obtained referral\n");
+ ret = -EREMOTE;
+ goto out;
+ }
+
+ memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid));
+out:
+ nfs_free_fattr(fsinfo.fattr);
+ dprintk("<-- nfs4_get_rootfh() = %d\n", ret);
+ return ret;
+}
+
+/*
+ * get an NFS4 root dentry from the root filehandle
+ */
+struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh,
+ const char *devname)
+{
+ struct nfs_server *server = NFS_SB(sb);
+ struct nfs_fattr *fattr = NULL;
+ struct dentry *ret;
+ struct inode *inode;
+ void *name = kstrdup(devname, GFP_KERNEL);
+ int error;
+
+ dprintk("--> nfs4_get_root()\n");
+
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+
+ /* get the info about the server and filesystem */
+ error = nfs4_server_capabilities(server, mntfh);
+ if (error < 0) {
+ dprintk("nfs_get_root: getcaps error = %d\n",
+ -error);
+ kfree(name);
+ return ERR_PTR(error);
+ }
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL) {
+ kfree(name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* get the actual root for this mount */
+ error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
+ if (error < 0) {
+ dprintk("nfs_get_root: getattr error = %d\n", -error);
+ ret = ERR_PTR(error);
+ goto out;
+ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_FSID &&
+ !nfs_fsid_equal(&server->fsid, &fattr->fsid))
+ memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
+
+ inode = nfs_fhget(sb, mntfh, fattr);
+ if (IS_ERR(inode)) {
+ dprintk("nfs_get_root: get root inode failed\n");
+ ret = ERR_CAST(inode);
+ goto out;
+ }
+
+ error = nfs_superblock_set_dummy_root(sb, inode);
+ if (error != 0) {
+ ret = ERR_PTR(error);
+ goto out;
+ }
+
+ /* root dentries normally start off anonymous and get spliced in later
+ * if the dentry tree reaches them; however if the dentry already
+ * exists, we'll pick it up at this point and use it as the root
+ */
+ ret = d_obtain_alias(inode);
+ if (IS_ERR(ret)) {
+ dprintk("nfs_get_root: get root dentry failed\n");
+ goto out;
+ }
+
+ security_d_instantiate(ret, inode);
+ spin_lock(&ret->d_lock);
+ if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
+ ret->d_fsdata = name;
+ name = NULL;
+ }
+ spin_unlock(&ret->d_lock);
+out:
+ if (name)
+ kfree(name);
+ nfs_free_fattr(fattr);
+ dprintk("<-- nfs4_get_root()\n");
+ return ret;
+}
+
+#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
new file mode 100644
index 00000000..93aa3a4c
--- /dev/null
+++ b/fs/nfs/idmap.c
@@ -0,0 +1,795 @@
+/*
+ * fs/nfs/idmap.c
+ *
+ * UID and GID to name mapping for clients.
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Marius Aamodt Eriksen <marius@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/types.h>
+#include <linux/parser.h>
+#include <linux/fs.h>
+#include <linux/nfs_idmap.h>
+#include <net/net_namespace.h>
+#include <linux/sunrpc/rpc_pipe_fs.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/key-type.h>
+#include <keys/user-type.h>
+#include <linux/module.h>
+
+#include "internal.h"
+#include "netns.h"
+
+#define NFS_UINT_MAXLEN 11
+
+/* Default cache timeout is 10 minutes */
+unsigned int nfs_idmap_cache_timeout = 600;
+static const struct cred *id_resolver_cache;
+static struct key_type key_type_id_resolver_legacy;
+
+struct idmap {
+ struct rpc_pipe *idmap_pipe;
+ struct key_construction *idmap_key_cons;
+ struct mutex idmap_mutex;
+};
+
+/**
+ * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
+ * @fattr: fully initialised struct nfs_fattr
+ * @owner_name: owner name string cache
+ * @group_name: group name string cache
+ */
+void nfs_fattr_init_names(struct nfs_fattr *fattr,
+ struct nfs4_string *owner_name,
+ struct nfs4_string *group_name)
+{
+ fattr->owner_name = owner_name;
+ fattr->group_name = group_name;
+}
+
+static void nfs_fattr_free_owner_name(struct nfs_fattr *fattr)
+{
+ fattr->valid &= ~NFS_ATTR_FATTR_OWNER_NAME;
+ kfree(fattr->owner_name->data);
+}
+
+static void nfs_fattr_free_group_name(struct nfs_fattr *fattr)
+{
+ fattr->valid &= ~NFS_ATTR_FATTR_GROUP_NAME;
+ kfree(fattr->group_name->data);
+}
+
+static bool nfs_fattr_map_owner_name(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+ struct nfs4_string *owner = fattr->owner_name;
+ __u32 uid;
+
+ if (!(fattr->valid & NFS_ATTR_FATTR_OWNER_NAME))
+ return false;
+ if (nfs_map_name_to_uid(server, owner->data, owner->len, &uid) == 0) {
+ fattr->uid = uid;
+ fattr->valid |= NFS_ATTR_FATTR_OWNER;
+ }
+ return true;
+}
+
+static bool nfs_fattr_map_group_name(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+ struct nfs4_string *group = fattr->group_name;
+ __u32 gid;
+
+ if (!(fattr->valid & NFS_ATTR_FATTR_GROUP_NAME))
+ return false;
+ if (nfs_map_group_to_gid(server, group->data, group->len, &gid) == 0) {
+ fattr->gid = gid;
+ fattr->valid |= NFS_ATTR_FATTR_GROUP;
+ }
+ return true;
+}
+
+/**
+ * nfs_fattr_free_names - free up the NFSv4 owner and group strings
+ * @fattr: a fully initialised nfs_fattr structure
+ */
+void nfs_fattr_free_names(struct nfs_fattr *fattr)
+{
+ if (fattr->valid & NFS_ATTR_FATTR_OWNER_NAME)
+ nfs_fattr_free_owner_name(fattr);
+ if (fattr->valid & NFS_ATTR_FATTR_GROUP_NAME)
+ nfs_fattr_free_group_name(fattr);
+}
+
+/**
+ * nfs_fattr_map_and_free_names - map owner/group strings into uid/gid and free
+ * @server: pointer to the filesystem nfs_server structure
+ * @fattr: a fully initialised nfs_fattr structure
+ *
+ * This helper maps the cached NFSv4 owner/group strings in fattr into
+ * their numeric uid/gid equivalents, and then frees the cached strings.
+ */
+void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *fattr)
+{
+ if (nfs_fattr_map_owner_name(server, fattr))
+ nfs_fattr_free_owner_name(fattr);
+ if (nfs_fattr_map_group_name(server, fattr))
+ nfs_fattr_free_group_name(fattr);
+}
+
+static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
+{
+ unsigned long val;
+ char buf[16];
+
+ if (memchr(name, '@', namelen) != NULL || namelen >= sizeof(buf))
+ return 0;
+ memcpy(buf, name, namelen);
+ buf[namelen] = '\0';
+ if (strict_strtoul(buf, 0, &val) != 0)
+ return 0;
+ *res = val;
+ return 1;
+}
+
+static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen)
+{
+ return snprintf(buf, buflen, "%u", id);
+}
+
+static struct key_type key_type_id_resolver = {
+ .name = "id_resolver",
+ .instantiate = user_instantiate,
+ .match = user_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
+};
+
+static int nfs_idmap_init_keyring(void)
+{
+ struct cred *cred;
+ struct key *keyring;
+ int ret = 0;
+
+ printk(KERN_NOTICE "NFS: Registering the %s key type\n",
+ key_type_id_resolver.name);
+
+ cred = prepare_kernel_cred(NULL);
+ if (!cred)
+ return -ENOMEM;
+
+ keyring = key_alloc(&key_type_keyring, ".id_resolver", 0, 0, cred,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto failed_put_cred;
+ }
+
+ ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
+ if (ret < 0)
+ goto failed_put_key;
+
+ ret = register_key_type(&key_type_id_resolver);
+ if (ret < 0)
+ goto failed_put_key;
+
+ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
+ cred->thread_keyring = keyring;
+ cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+ id_resolver_cache = cred;
+ return 0;
+
+failed_put_key:
+ key_put(keyring);
+failed_put_cred:
+ put_cred(cred);
+ return ret;
+}
+
+static void nfs_idmap_quit_keyring(void)
+{
+ key_revoke(id_resolver_cache->thread_keyring);
+ unregister_key_type(&key_type_id_resolver);
+ put_cred(id_resolver_cache);
+}
+
+/*
+ * Assemble the description to pass to request_key()
+ * This function will allocate a new string and update dest to point
+ * at it. The caller is responsible for freeing dest.
+ *
+ * On error 0 is returned. Otherwise, the length of dest is returned.
+ */
+static ssize_t nfs_idmap_get_desc(const char *name, size_t namelen,
+ const char *type, size_t typelen, char **desc)
+{
+ char *cp;
+ size_t desclen = typelen + namelen + 2;
+
+ *desc = kmalloc(desclen, GFP_KERNEL);
+ if (!*desc)
+ return -ENOMEM;
+
+ cp = *desc;
+ memcpy(cp, type, typelen);
+ cp += typelen;
+ *cp++ = ':';
+
+ memcpy(cp, name, namelen);
+ cp += namelen;
+ *cp = '\0';
+ return desclen;
+}
+
+static ssize_t nfs_idmap_request_key(struct key_type *key_type,
+ const char *name, size_t namelen,
+ const char *type, void *data,
+ size_t data_size, struct idmap *idmap)
+{
+ const struct cred *saved_cred;
+ struct key *rkey;
+ char *desc;
+ struct user_key_payload *payload;
+ ssize_t ret;
+
+ ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc);
+ if (ret <= 0)
+ goto out;
+
+ saved_cred = override_creds(id_resolver_cache);
+ if (idmap)
+ rkey = request_key_with_auxdata(key_type, desc, "", 0, idmap);
+ else
+ rkey = request_key(&key_type_id_resolver, desc, "");
+ revert_creds(saved_cred);
+
+ kfree(desc);
+ if (IS_ERR(rkey)) {
+ ret = PTR_ERR(rkey);
+ goto out;
+ }
+
+ rcu_read_lock();
+ rkey->perm |= KEY_USR_VIEW;
+
+ ret = key_validate(rkey);
+ if (ret < 0)
+ goto out_up;
+
+ payload = rcu_dereference(rkey->payload.data);
+ if (IS_ERR_OR_NULL(payload)) {
+ ret = PTR_ERR(payload);
+ goto out_up;
+ }
+
+ ret = payload->datalen;
+ if (ret > 0 && ret <= data_size)
+ memcpy(data, payload->data, ret);
+ else
+ ret = -EINVAL;
+
+out_up:
+ rcu_read_unlock();
+ key_put(rkey);
+out:
+ return ret;
+}
+
+static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
+ const char *type, void *data,
+ size_t data_size, struct idmap *idmap)
+{
+ ssize_t ret = nfs_idmap_request_key(&key_type_id_resolver,
+ name, namelen, type, data,
+ data_size, NULL);
+ if (ret < 0) {
+ mutex_lock(&idmap->idmap_mutex);
+ ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
+ name, namelen, type, data,
+ data_size, idmap);
+ mutex_unlock(&idmap->idmap_mutex);
+ }
+ return ret;
+}
+
+/* ID -> Name */
+static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
+ size_t buflen, struct idmap *idmap)
+{
+ char id_str[NFS_UINT_MAXLEN];
+ int id_len;
+ ssize_t ret;
+
+ id_len = snprintf(id_str, sizeof(id_str), "%u", id);
+ ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
+ if (ret < 0)
+ return -EINVAL;
+ return ret;
+}
+
+/* Name -> ID */
+static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *type,
+ __u32 *id, struct idmap *idmap)
+{
+ char id_str[NFS_UINT_MAXLEN];
+ long id_long;
+ ssize_t data_size;
+ int ret = 0;
+
+ data_size = nfs_idmap_get_key(name, namelen, type, id_str, NFS_UINT_MAXLEN, idmap);
+ if (data_size <= 0) {
+ ret = -EINVAL;
+ } else {
+ ret = strict_strtol(id_str, 10, &id_long);
+ *id = (__u32)id_long;
+ }
+ return ret;
+}
+
+/* idmap classic begins here */
+module_param(nfs_idmap_cache_timeout, int, 0644);
+
+enum {
+ Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
+};
+
+static const match_table_t nfs_idmap_tokens = {
+ { Opt_find_uid, "uid:%s" },
+ { Opt_find_gid, "gid:%s" },
+ { Opt_find_user, "user:%s" },
+ { Opt_find_group, "group:%s" },
+ { Opt_find_err, NULL }
+};
+
+static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
+static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
+ size_t);
+static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
+
+static const struct rpc_pipe_ops idmap_upcall_ops = {
+ .upcall = rpc_pipe_generic_upcall,
+ .downcall = idmap_pipe_downcall,
+ .destroy_msg = idmap_pipe_destroy_msg,
+};
+
+static struct key_type key_type_id_resolver_legacy = {
+ .name = "id_resolver",
+ .instantiate = user_instantiate,
+ .match = user_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
+ .request_key = nfs_idmap_legacy_upcall,
+};
+
+static void __nfs_idmap_unregister(struct rpc_pipe *pipe)
+{
+ if (pipe->dentry)
+ rpc_unlink(pipe->dentry);
+}
+
+static int __nfs_idmap_register(struct dentry *dir,
+ struct idmap *idmap,
+ struct rpc_pipe *pipe)
+{
+ struct dentry *dentry;
+
+ dentry = rpc_mkpipe_dentry(dir, "idmap", idmap, pipe);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ pipe->dentry = dentry;
+ return 0;
+}
+
+static void nfs_idmap_unregister(struct nfs_client *clp,
+ struct rpc_pipe *pipe)
+{
+ struct net *net = clp->net;
+ struct super_block *pipefs_sb;
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+ __nfs_idmap_unregister(pipe);
+ rpc_put_sb_net(net);
+ }
+}
+
+static int nfs_idmap_register(struct nfs_client *clp,
+ struct idmap *idmap,
+ struct rpc_pipe *pipe)
+{
+ struct net *net = clp->net;
+ struct super_block *pipefs_sb;
+ int err = 0;
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+ if (clp->cl_rpcclient->cl_dentry)
+ err = __nfs_idmap_register(clp->cl_rpcclient->cl_dentry,
+ idmap, pipe);
+ rpc_put_sb_net(net);
+ }
+ return err;
+}
+
+int
+nfs_idmap_new(struct nfs_client *clp)
+{
+ struct idmap *idmap;
+ struct rpc_pipe *pipe;
+ int error;
+
+ BUG_ON(clp->cl_idmap != NULL);
+
+ idmap = kzalloc(sizeof(*idmap), GFP_KERNEL);
+ if (idmap == NULL)
+ return -ENOMEM;
+
+ pipe = rpc_mkpipe_data(&idmap_upcall_ops, 0);
+ if (IS_ERR(pipe)) {
+ error = PTR_ERR(pipe);
+ kfree(idmap);
+ return error;
+ }
+ error = nfs_idmap_register(clp, idmap, pipe);
+ if (error) {
+ rpc_destroy_pipe_data(pipe);
+ kfree(idmap);
+ return error;
+ }
+ idmap->idmap_pipe = pipe;
+ mutex_init(&idmap->idmap_mutex);
+
+ clp->cl_idmap = idmap;
+ return 0;
+}
+
+void
+nfs_idmap_delete(struct nfs_client *clp)
+{
+ struct idmap *idmap = clp->cl_idmap;
+
+ if (!idmap)
+ return;
+ nfs_idmap_unregister(clp, idmap->idmap_pipe);
+ rpc_destroy_pipe_data(idmap->idmap_pipe);
+ clp->cl_idmap = NULL;
+ kfree(idmap);
+}
+
+static int __rpc_pipefs_event(struct nfs_client *clp, unsigned long event,
+ struct super_block *sb)
+{
+ int err = 0;
+
+ switch (event) {
+ case RPC_PIPEFS_MOUNT:
+ BUG_ON(clp->cl_rpcclient->cl_dentry == NULL);
+ err = __nfs_idmap_register(clp->cl_rpcclient->cl_dentry,
+ clp->cl_idmap,
+ clp->cl_idmap->idmap_pipe);
+ break;
+ case RPC_PIPEFS_UMOUNT:
+ if (clp->cl_idmap->idmap_pipe) {
+ struct dentry *parent;
+
+ parent = clp->cl_idmap->idmap_pipe->dentry->d_parent;
+ __nfs_idmap_unregister(clp->cl_idmap->idmap_pipe);
+ /*
+ * Note: This is a dirty hack. SUNRPC hook has been
+ * called already but simple_rmdir() call for the
+ * directory returned with error because of idmap pipe
+ * inside. Thus now we have to remove this directory
+ * here.
+ */
+ if (rpc_rmdir(parent))
+ printk(KERN_ERR "NFS: %s: failed to remove "
+ "clnt dir!\n", __func__);
+ }
+ break;
+ default:
+ printk(KERN_ERR "NFS: %s: unknown event: %ld\n", __func__,
+ event);
+ return -ENOTSUPP;
+ }
+ return err;
+}
+
+static struct nfs_client *nfs_get_client_for_event(struct net *net, int event)
+{
+ struct nfs_net *nn = net_generic(net, nfs_net_id);
+ struct dentry *cl_dentry;
+ struct nfs_client *clp;
+
+ spin_lock(&nn->nfs_client_lock);
+ list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
+ if (clp->rpc_ops != &nfs_v4_clientops)
+ continue;
+ cl_dentry = clp->cl_idmap->idmap_pipe->dentry;
+ if (((event == RPC_PIPEFS_MOUNT) && cl_dentry) ||
+ ((event == RPC_PIPEFS_UMOUNT) && !cl_dentry))
+ continue;
+ atomic_inc(&clp->cl_count);
+ spin_unlock(&nn->nfs_client_lock);
+ return clp;
+ }
+ spin_unlock(&nn->nfs_client_lock);
+ return NULL;
+}
+
+static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct super_block *sb = ptr;
+ struct nfs_client *clp;
+ int error = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return 0;
+
+ while ((clp = nfs_get_client_for_event(sb->s_fs_info, event))) {
+ error = __rpc_pipefs_event(clp, event, sb);
+ nfs_put_client(clp);
+ if (error)
+ break;
+ }
+ module_put(THIS_MODULE);
+ return error;
+}
+
+#define PIPEFS_NFS_PRIO 1
+
+static struct notifier_block nfs_idmap_block = {
+ .notifier_call = rpc_pipefs_event,
+ .priority = SUNRPC_PIPEFS_NFS_PRIO,
+};
+
+int nfs_idmap_init(void)
+{
+ int ret;
+ ret = nfs_idmap_init_keyring();
+ if (ret != 0)
+ goto out;
+ ret = rpc_pipefs_notifier_register(&nfs_idmap_block);
+ if (ret != 0)
+ nfs_idmap_quit_keyring();
+out:
+ return ret;
+}
+
+void nfs_idmap_quit(void)
+{
+ rpc_pipefs_notifier_unregister(&nfs_idmap_block);
+ nfs_idmap_quit_keyring();
+}
+
+static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im,
+ struct rpc_pipe_msg *msg)
+{
+ substring_t substr;
+ int token, ret;
+
+ memset(im, 0, sizeof(*im));
+ memset(msg, 0, sizeof(*msg));
+
+ im->im_type = IDMAP_TYPE_GROUP;
+ token = match_token(desc, nfs_idmap_tokens, &substr);
+
+ switch (token) {
+ case Opt_find_uid:
+ im->im_type = IDMAP_TYPE_USER;
+ case Opt_find_gid:
+ im->im_conv = IDMAP_CONV_NAMETOID;
+ ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ);
+ break;
+
+ case Opt_find_user:
+ im->im_type = IDMAP_TYPE_USER;
+ case Opt_find_group:
+ im->im_conv = IDMAP_CONV_IDTONAME;
+ ret = match_int(&substr, &im->im_id);
+ break;
+
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ msg->data = im;
+ msg->len = sizeof(struct idmap_msg);
+
+out:
+ return ret;
+}
+
+static int nfs_idmap_legacy_upcall(struct key_construction *cons,
+ const char *op,
+ void *aux)
+{
+ struct rpc_pipe_msg *msg;
+ struct idmap_msg *im;
+ struct idmap *idmap = (struct idmap *)aux;
+ struct key *key = cons->key;
+ int ret = -ENOMEM;
+
+ /* msg and im are freed in idmap_pipe_destroy_msg */
+ msg = kmalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ goto out0;
+
+ im = kmalloc(sizeof(*im), GFP_KERNEL);
+ if (!im)
+ goto out1;
+
+ ret = nfs_idmap_prepare_message(key->description, im, msg);
+ if (ret < 0)
+ goto out2;
+
+ idmap->idmap_key_cons = cons;
+
+ ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
+ if (ret < 0)
+ goto out2;
+
+ return ret;
+
+out2:
+ kfree(im);
+out1:
+ kfree(msg);
+out0:
+ key_revoke(cons->key);
+ key_revoke(cons->authkey);
+ return ret;
+}
+
+static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data)
+{
+ return key_instantiate_and_link(key, data, strlen(data) + 1,
+ id_resolver_cache->thread_keyring,
+ authkey);
+}
+
+static int nfs_idmap_read_message(struct idmap_msg *im, struct key *key, struct key *authkey)
+{
+ char id_str[NFS_UINT_MAXLEN];
+ int ret = -EINVAL;
+
+ switch (im->im_conv) {
+ case IDMAP_CONV_NAMETOID:
+ sprintf(id_str, "%d", im->im_id);
+ ret = nfs_idmap_instantiate(key, authkey, id_str);
+ break;
+ case IDMAP_CONV_IDTONAME:
+ ret = nfs_idmap_instantiate(key, authkey, im->im_name);
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t
+idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+{
+ struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
+ struct idmap *idmap = (struct idmap *)rpci->private;
+ struct key_construction *cons = idmap->idmap_key_cons;
+ struct idmap_msg im;
+ size_t namelen_in;
+ int ret;
+
+ if (mlen != sizeof(im)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ if (copy_from_user(&im, src, mlen) != 0) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
+ ret = mlen;
+ complete_request_key(idmap->idmap_key_cons, -ENOKEY);
+ goto out_incomplete;
+ }
+
+ namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
+ if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = nfs_idmap_read_message(&im, cons->key, cons->authkey);
+ if (ret >= 0) {
+ key_set_timeout(cons->key, nfs_idmap_cache_timeout);
+ ret = mlen;
+ }
+
+out:
+ complete_request_key(idmap->idmap_key_cons, ret);
+out_incomplete:
+ return ret;
+}
+
+static void
+idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
+{
+ /* Free memory allocated in nfs_idmap_legacy_upcall() */
+ kfree(msg->data);
+ kfree(msg);
+}
+
+int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid)
+{
+ struct idmap *idmap = server->nfs_client->cl_idmap;
+
+ if (nfs_map_string_to_numeric(name, namelen, uid))
+ return 0;
+ return nfs_idmap_lookup_id(name, namelen, "uid", uid, idmap);
+}
+
+int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *gid)
+{
+ struct idmap *idmap = server->nfs_client->cl_idmap;
+
+ if (nfs_map_string_to_numeric(name, namelen, gid))
+ return 0;
+ return nfs_idmap_lookup_id(name, namelen, "gid", gid, idmap);
+}
+
+int nfs_map_uid_to_name(const struct nfs_server *server, __u32 uid, char *buf, size_t buflen)
+{
+ struct idmap *idmap = server->nfs_client->cl_idmap;
+ int ret = -EINVAL;
+
+ if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
+ ret = nfs_idmap_lookup_name(uid, "user", buf, buflen, idmap);
+ if (ret < 0)
+ ret = nfs_map_numeric_to_string(uid, buf, buflen);
+ return ret;
+}
+int nfs_map_gid_to_group(const struct nfs_server *server, __u32 gid, char *buf, size_t buflen)
+{
+ struct idmap *idmap = server->nfs_client->cl_idmap;
+ int ret = -EINVAL;
+
+ if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
+ ret = nfs_idmap_lookup_name(gid, "group", buf, buflen, idmap);
+ if (ret < 0)
+ ret = nfs_map_numeric_to_string(gid, buf, buflen);
+ return ret;
+}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
new file mode 100644
index 00000000..e8bbfa5b
--- /dev/null
+++ b/fs/nfs/inode.c
@@ -0,0 +1,1752 @@
+/*
+ * linux/fs/nfs/inode.c
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ *
+ * nfs inode and superblock handling functions
+ *
+ * Modularised by Alan Cox <alan@lxorguk.ukuu.org.uk>, while hacking some
+ * experimental NFS changes. Modularisation taken straight from SYS5 fs.
+ *
+ * Change to nfs_read_super() to permit NFS mounts to multi-homed hosts.
+ * J.S.Peatfield@damtp.cam.ac.uk
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/metrics.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include <linux/lockd/bind.h>
+#include <linux/seq_file.h>
+#include <linux/mount.h>
+#include <linux/nfs_idmap.h>
+#include <linux/vfs.h>
+#include <linux/inet.h>
+#include <linux/nfs_xdr.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/freezer.h>
+#include <linux/crc32.h>
+
+#include <asm/uaccess.h>
+
+#include "nfs4_fs.h"
+#include "callback.h"
+#include "delegation.h"
+#include "iostat.h"
+#include "internal.h"
+#include "fscache.h"
+#include "dns_resolve.h"
+#include "pnfs.h"
+#include "netns.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+#define NFS_64_BIT_INODE_NUMBERS_ENABLED 1
+
+/* Default is to see 64-bit inode numbers */
+static bool enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED;
+
+static void nfs_invalidate_inode(struct inode *);
+static int nfs_update_inode(struct inode *, struct nfs_fattr *);
+
+static struct kmem_cache * nfs_inode_cachep;
+
+static inline unsigned long
+nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
+{
+ return nfs_fileid_to_ino_t(fattr->fileid);
+}
+
+/**
+ * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks
+ * @word: long word containing the bit lock
+ */
+int nfs_wait_bit_killable(void *word)
+{
+ if (fatal_signal_pending(current))
+ return -ERESTARTSYS;
+ freezable_schedule();
+ return 0;
+}
+
+/**
+ * nfs_compat_user_ino64 - returns the user-visible inode number
+ * @fileid: 64-bit fileid
+ *
+ * This function returns a 32-bit inode number if the boot parameter
+ * nfs.enable_ino64 is zero.
+ */
+u64 nfs_compat_user_ino64(u64 fileid)
+{
+#ifdef CONFIG_COMPAT
+ compat_ulong_t ino;
+#else
+ unsigned long ino;
+#endif
+
+ if (enable_ino64)
+ return fileid;
+ ino = fileid;
+ if (sizeof(ino) < sizeof(fileid))
+ ino ^= fileid >> (sizeof(fileid)-sizeof(ino)) * 8;
+ return ino;
+}
+
+static void nfs_clear_inode(struct inode *inode)
+{
+ /*
+ * The following should never happen...
+ */
+ BUG_ON(nfs_have_writebacks(inode));
+ BUG_ON(!list_empty(&NFS_I(inode)->open_files));
+ nfs_zap_acl_cache(inode);
+ nfs_access_zap_cache(inode);
+ nfs_fscache_release_inode_cookie(inode);
+}
+
+void nfs_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+ nfs_clear_inode(inode);
+}
+
+/**
+ * nfs_sync_mapping - helper to flush all mmapped dirty data to disk
+ */
+int nfs_sync_mapping(struct address_space *mapping)
+{
+ int ret = 0;
+
+ if (mapping->nrpages != 0) {
+ unmap_mapping_range(mapping, 0, 0, 0);
+ ret = nfs_wb_all(mapping->host);
+ }
+ return ret;
+}
+
+/*
+ * Invalidate the local caches
+ */
+static void nfs_zap_caches_locked(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ int mode = inode->i_mode;
+
+ nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
+
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = jiffies;
+
+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
+ if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+ else
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
+}
+
+void nfs_zap_caches(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ nfs_zap_caches_locked(inode);
+ spin_unlock(&inode->i_lock);
+}
+
+void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
+{
+ if (mapping->nrpages != 0) {
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
+ spin_unlock(&inode->i_lock);
+ }
+}
+
+void nfs_zap_acl_cache(struct inode *inode)
+{
+ void (*clear_acl_cache)(struct inode *);
+
+ clear_acl_cache = NFS_PROTO(inode)->clear_acl_cache;
+ if (clear_acl_cache != NULL)
+ clear_acl_cache(inode);
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_ACL;
+ spin_unlock(&inode->i_lock);
+}
+
+void nfs_invalidate_atime(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Invalidate, but do not unhash, the inode.
+ * NB: must be called with inode->i_lock held!
+ */
+static void nfs_invalidate_inode(struct inode *inode)
+{
+ set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+ nfs_zap_caches_locked(inode);
+}
+
+struct nfs_find_desc {
+ struct nfs_fh *fh;
+ struct nfs_fattr *fattr;
+};
+
+/*
+ * In NFSv3 we can have 64bit inode numbers. In order to support
+ * this, and re-exported directories (also seen in NFSv2)
+ * we are forced to allow 2 different inodes to have the same
+ * i_ino.
+ */
+static int
+nfs_find_actor(struct inode *inode, void *opaque)
+{
+ struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
+ struct nfs_fh *fh = desc->fh;
+ struct nfs_fattr *fattr = desc->fattr;
+
+ if (NFS_FILEID(inode) != fattr->fileid)
+ return 0;
+ if (nfs_compare_fh(NFS_FH(inode), fh))
+ return 0;
+ if (is_bad_inode(inode) || NFS_STALE(inode))
+ return 0;
+ return 1;
+}
+
+static int
+nfs_init_locked(struct inode *inode, void *opaque)
+{
+ struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
+ struct nfs_fattr *fattr = desc->fattr;
+
+ set_nfs_fileid(inode, fattr->fileid);
+ nfs_copy_fh(NFS_FH(inode), desc->fh);
+ return 0;
+}
+
+/*
+ * This is our front-end to iget that looks up inodes by file handle
+ * instead of inode number.
+ */
+struct inode *
+nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
+{
+ struct nfs_find_desc desc = {
+ .fh = fh,
+ .fattr = fattr
+ };
+ struct inode *inode = ERR_PTR(-ENOENT);
+ unsigned long hash;
+
+ nfs_attr_check_mountpoint(sb, fattr);
+
+ if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
+ !nfs_attr_use_mounted_on_fileid(fattr))
+ goto out_no_inode;
+ if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
+ goto out_no_inode;
+
+ hash = nfs_fattr_to_ino_t(fattr);
+
+ inode = iget5_locked(sb, hash, nfs_find_actor, nfs_init_locked, &desc);
+ if (inode == NULL) {
+ inode = ERR_PTR(-ENOMEM);
+ goto out_no_inode;
+ }
+
+ if (inode->i_state & I_NEW) {
+ struct nfs_inode *nfsi = NFS_I(inode);
+ unsigned long now = jiffies;
+
+ /* We set i_ino for the few things that still rely on it,
+ * such as stat(2) */
+ inode->i_ino = hash;
+
+ /* We can't support update_atime(), since the server will reset it */
+ inode->i_flags |= S_NOATIME|S_NOCMTIME;
+ inode->i_mode = fattr->mode;
+ if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
+ && nfs_server_capable(inode, NFS_CAP_MODE))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
+ /* Why so? Because we want revalidate for devices/FIFOs, and
+ * that's precisely what we have in nfs_file_inode_operations.
+ */
+ inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
+ if (S_ISREG(inode->i_mode)) {
+ inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
+ inode->i_data.a_ops = &nfs_file_aops;
+ inode->i_data.backing_dev_info = &NFS_SB(sb)->backing_dev_info;
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
+ inode->i_fop = &nfs_dir_operations;
+ inode->i_data.a_ops = &nfs_dir_aops;
+ if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
+ set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
+ /* Deal with crossing mountpoints */
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
+ fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
+ if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
+ inode->i_op = &nfs_referral_inode_operations;
+ else
+ inode->i_op = &nfs_mountpoint_inode_operations;
+ inode->i_fop = NULL;
+ inode->i_flags |= S_AUTOMOUNT;
+ }
+ } else if (S_ISLNK(inode->i_mode))
+ inode->i_op = &nfs_symlink_inode_operations;
+ else
+ init_special_inode(inode, inode->i_mode, fattr->rdev);
+
+ memset(&inode->i_atime, 0, sizeof(inode->i_atime));
+ memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
+ memset(&inode->i_ctime, 0, sizeof(inode->i_ctime));
+ inode->i_version = 0;
+ inode->i_size = 0;
+ clear_nlink(inode);
+ inode->i_uid = -2;
+ inode->i_gid = -2;
+ inode->i_blocks = 0;
+ memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+
+ nfsi->read_cache_jiffies = fattr->time_start;
+ nfsi->attr_gencount = fattr->gencount;
+ if (fattr->valid & NFS_ATTR_FATTR_ATIME)
+ inode->i_atime = fattr->atime;
+ else if (nfs_server_capable(inode, NFS_CAP_ATIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+ if (fattr->valid & NFS_ATTR_FATTR_MTIME)
+ inode->i_mtime = fattr->mtime;
+ else if (nfs_server_capable(inode, NFS_CAP_MTIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA;
+ if (fattr->valid & NFS_ATTR_FATTR_CTIME)
+ inode->i_ctime = fattr->ctime;
+ else if (nfs_server_capable(inode, NFS_CAP_CTIME))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
+ if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
+ inode->i_version = fattr->change_attr;
+ else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA;
+ if (fattr->valid & NFS_ATTR_FATTR_SIZE)
+ inode->i_size = nfs_size_to_loff_t(fattr->size);
+ else
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_REVAL_PAGECACHE;
+ if (fattr->valid & NFS_ATTR_FATTR_NLINK)
+ set_nlink(inode, fattr->nlink);
+ else if (nfs_server_capable(inode, NFS_CAP_NLINK))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+ if (fattr->valid & NFS_ATTR_FATTR_OWNER)
+ inode->i_uid = fattr->uid;
+ else if (nfs_server_capable(inode, NFS_CAP_OWNER))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
+ if (fattr->valid & NFS_ATTR_FATTR_GROUP)
+ inode->i_gid = fattr->gid;
+ else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL;
+ if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
+ inode->i_blocks = fattr->du.nfs2.blocks;
+ if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
+ /*
+ * report the blocks in 512byte units
+ */
+ inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
+ }
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = now;
+ nfsi->access_cache = RB_ROOT;
+
+ nfs_fscache_init_inode_cookie(inode);
+
+ unlock_new_inode(inode);
+ } else
+ nfs_refresh_inode(inode, fattr);
+ dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n",
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ nfs_display_fhandle_hash(fh),
+ atomic_read(&inode->i_count));
+
+out:
+ return inode;
+
+out_no_inode:
+ dprintk("nfs_fhget: iget failed with error %ld\n", PTR_ERR(inode));
+ goto out;
+}
+
+#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE|ATTR_OPEN)
+
+int
+nfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct nfs_fattr *fattr;
+ int error = -ENOMEM;
+
+ nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
+
+ /* skip mode change if it's just for clearing setuid/setgid */
+ if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+ attr->ia_valid &= ~ATTR_MODE;
+
+ if (attr->ia_valid & ATTR_SIZE) {
+ if (!S_ISREG(inode->i_mode) || attr->ia_size == i_size_read(inode))
+ attr->ia_valid &= ~ATTR_SIZE;
+ }
+
+ /* Optimization: if the end result is no change, don't RPC */
+ attr->ia_valid &= NFS_VALID_ATTRS;
+ if ((attr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
+ return 0;
+
+ /* Write all dirty data */
+ if (S_ISREG(inode->i_mode))
+ nfs_wb_all(inode);
+
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
+ /*
+ * Return any delegations if we're going to change ACLs
+ */
+ if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
+ nfs_inode_return_delegation(inode);
+ error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
+ if (error == 0)
+ nfs_refresh_inode(inode, fattr);
+ nfs_free_fattr(fattr);
+out:
+ return error;
+}
+
+/**
+ * nfs_vmtruncate - unmap mappings "freed" by truncate() syscall
+ * @inode: inode of the file used
+ * @offset: file offset to start truncating
+ *
+ * This is a copy of the common vmtruncate, but with the locking
+ * corrected to take into account the fact that NFS requires
+ * inode->i_size to be updated under the inode->i_lock.
+ */
+static int nfs_vmtruncate(struct inode * inode, loff_t offset)
+{
+ loff_t oldsize;
+ int err;
+
+ err = inode_newsize_ok(inode, offset);
+ if (err)
+ goto out;
+
+ spin_lock(&inode->i_lock);
+ oldsize = inode->i_size;
+ i_size_write(inode, offset);
+ spin_unlock(&inode->i_lock);
+
+ truncate_pagecache(inode, oldsize, offset);
+out:
+ return err;
+}
+
+/**
+ * nfs_setattr_update_inode - Update inode metadata after a setattr call.
+ * @inode: pointer to struct inode
+ * @attr: pointer to struct iattr
+ *
+ * Note: we do this in the *proc.c in order to ensure that
+ * it works for things like exclusive creates too.
+ */
+void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
+{
+ if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
+ spin_lock(&inode->i_lock);
+ if ((attr->ia_valid & ATTR_MODE) != 0) {
+ int mode = attr->ia_mode & S_IALLUGO;
+ mode |= inode->i_mode & ~S_IALLUGO;
+ inode->i_mode = mode;
+ }
+ if ((attr->ia_valid & ATTR_UID) != 0)
+ inode->i_uid = attr->ia_uid;
+ if ((attr->ia_valid & ATTR_GID) != 0)
+ inode->i_gid = attr->ia_gid;
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ spin_unlock(&inode->i_lock);
+ }
+ if ((attr->ia_valid & ATTR_SIZE) != 0) {
+ nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
+ nfs_vmtruncate(inode, attr->ia_size);
+ }
+}
+
+int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+ int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
+ int err;
+
+ /* Flush out writes to the server in order to update c/mtime. */
+ if (S_ISREG(inode->i_mode)) {
+ err = filemap_write_and_wait(inode->i_mapping);
+ if (err)
+ goto out;
+ }
+
+ /*
+ * We may force a getattr if the user cares about atime.
+ *
+ * Note that we only have to check the vfsmount flags here:
+ * - NFS always sets S_NOATIME by so checking it would give a
+ * bogus result
+ * - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
+ * no point in checking those.
+ */
+ if ((mnt->mnt_flags & MNT_NOATIME) ||
+ ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
+ need_atime = 0;
+
+ if (need_atime)
+ err = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ else
+ err = nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (!err) {
+ generic_fillattr(inode, stat);
+ stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
+ }
+out:
+ return err;
+}
+
+static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
+{
+ atomic_set(&l_ctx->count, 1);
+ l_ctx->lockowner = current->files;
+ l_ctx->pid = current->tgid;
+ INIT_LIST_HEAD(&l_ctx->list);
+}
+
+static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
+{
+ struct nfs_lock_context *pos;
+
+ list_for_each_entry(pos, &ctx->lock_context.list, list) {
+ if (pos->lockowner != current->files)
+ continue;
+ if (pos->pid != current->tgid)
+ continue;
+ atomic_inc(&pos->count);
+ return pos;
+ }
+ return NULL;
+}
+
+struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
+{
+ struct nfs_lock_context *res, *new = NULL;
+ struct inode *inode = ctx->dentry->d_inode;
+
+ spin_lock(&inode->i_lock);
+ res = __nfs_find_lock_context(ctx);
+ if (res == NULL) {
+ spin_unlock(&inode->i_lock);
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (new == NULL)
+ return NULL;
+ nfs_init_lock_context(new);
+ spin_lock(&inode->i_lock);
+ res = __nfs_find_lock_context(ctx);
+ if (res == NULL) {
+ list_add_tail(&new->list, &ctx->lock_context.list);
+ new->open_context = ctx;
+ res = new;
+ new = NULL;
+ }
+ }
+ spin_unlock(&inode->i_lock);
+ kfree(new);
+ return res;
+}
+
+void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
+{
+ struct nfs_open_context *ctx = l_ctx->open_context;
+ struct inode *inode = ctx->dentry->d_inode;
+
+ if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
+ return;
+ list_del(&l_ctx->list);
+ spin_unlock(&inode->i_lock);
+ kfree(l_ctx);
+}
+
+/**
+ * nfs_close_context - Common close_context() routine NFSv2/v3
+ * @ctx: pointer to context
+ * @is_sync: is this a synchronous close
+ *
+ * always ensure that the attributes are up to date if we're mounted
+ * with close-to-open semantics
+ */
+void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
+{
+ struct inode *inode;
+ struct nfs_server *server;
+
+ if (!(ctx->mode & FMODE_WRITE))
+ return;
+ if (!is_sync)
+ return;
+ inode = ctx->dentry->d_inode;
+ if (!list_empty(&NFS_I(inode)->open_files))
+ return;
+ server = NFS_SERVER(inode);
+ if (server->flags & NFS_MOUNT_NOCTO)
+ return;
+ nfs_revalidate_inode(server, inode);
+}
+
+struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode)
+{
+ struct nfs_open_context *ctx;
+ struct rpc_cred *cred = rpc_lookup_cred();
+ if (IS_ERR(cred))
+ return ERR_CAST(cred);
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ put_rpccred(cred);
+ return ERR_PTR(-ENOMEM);
+ }
+ nfs_sb_active(dentry->d_sb);
+ ctx->dentry = dget(dentry);
+ ctx->cred = cred;
+ ctx->state = NULL;
+ ctx->mode = f_mode;
+ ctx->flags = 0;
+ ctx->error = 0;
+ nfs_init_lock_context(&ctx->lock_context);
+ ctx->lock_context.open_context = ctx;
+ INIT_LIST_HEAD(&ctx->list);
+ return ctx;
+}
+
+struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
+{
+ if (ctx != NULL)
+ atomic_inc(&ctx->lock_context.count);
+ return ctx;
+}
+
+static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
+{
+ struct inode *inode = ctx->dentry->d_inode;
+ struct super_block *sb = ctx->dentry->d_sb;
+
+ if (!list_empty(&ctx->list)) {
+ if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
+ return;
+ list_del(&ctx->list);
+ spin_unlock(&inode->i_lock);
+ } else if (!atomic_dec_and_test(&ctx->lock_context.count))
+ return;
+ if (inode != NULL)
+ NFS_PROTO(inode)->close_context(ctx, is_sync);
+ if (ctx->cred != NULL)
+ put_rpccred(ctx->cred);
+ dput(ctx->dentry);
+ nfs_sb_deactive(sb);
+ kfree(ctx);
+}
+
+void put_nfs_open_context(struct nfs_open_context *ctx)
+{
+ __put_nfs_open_context(ctx, 0);
+}
+
+/*
+ * Ensure that mmap has a recent RPC credential for use when writing out
+ * shared pages
+ */
+void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ filp->private_data = get_nfs_open_context(ctx);
+ spin_lock(&inode->i_lock);
+ list_add(&ctx->list, &nfsi->open_files);
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Given an inode, search for an open context with the desired characteristics
+ */
+struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_open_context *pos, *ctx = NULL;
+
+ spin_lock(&inode->i_lock);
+ list_for_each_entry(pos, &nfsi->open_files, list) {
+ if (cred != NULL && pos->cred != cred)
+ continue;
+ if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
+ continue;
+ ctx = get_nfs_open_context(pos);
+ break;
+ }
+ spin_unlock(&inode->i_lock);
+ return ctx;
+}
+
+static void nfs_file_clear_open_context(struct file *filp)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+ struct nfs_open_context *ctx = nfs_file_open_context(filp);
+
+ if (ctx) {
+ filp->private_data = NULL;
+ spin_lock(&inode->i_lock);
+ list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
+ spin_unlock(&inode->i_lock);
+ __put_nfs_open_context(ctx, filp->f_flags & O_DIRECT ? 0 : 1);
+ }
+}
+
+/*
+ * These allocate and release file read/write context information.
+ */
+int nfs_open(struct inode *inode, struct file *filp)
+{
+ struct nfs_open_context *ctx;
+
+ ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ nfs_file_set_open_context(filp, ctx);
+ put_nfs_open_context(ctx);
+ nfs_fscache_set_inode_cookie(inode, filp);
+ return 0;
+}
+
+int nfs_release(struct inode *inode, struct file *filp)
+{
+ nfs_file_clear_open_context(filp);
+ return 0;
+}
+
+/*
+ * This function is called whenever some part of NFS notices that
+ * the cached attributes have to be refreshed.
+ */
+int
+__nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
+{
+ int status = -ESTALE;
+ struct nfs_fattr *fattr = NULL;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n",
+ inode->i_sb->s_id, (long long)NFS_FILEID(inode));
+
+ if (is_bad_inode(inode))
+ goto out;
+ if (NFS_STALE(inode))
+ goto out;
+
+ status = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
+
+ nfs_inc_stats(inode, NFSIOS_INODEREVALIDATE);
+ status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr);
+ if (status != 0) {
+ dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) getattr failed, error=%d\n",
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode), status);
+ if (status == -ESTALE) {
+ nfs_zap_caches(inode);
+ if (!S_ISDIR(inode->i_mode))
+ set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
+ }
+ goto out;
+ }
+
+ status = nfs_refresh_inode(inode, fattr);
+ if (status) {
+ dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n",
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode), status);
+ goto out;
+ }
+
+ if (nfsi->cache_validity & NFS_INO_INVALID_ACL)
+ nfs_zap_acl_cache(inode);
+
+ dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n",
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode));
+
+ out:
+ nfs_free_fattr(fattr);
+ return status;
+}
+
+int nfs_attribute_timeout(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
+}
+
+static int nfs_attribute_cache_expired(struct inode *inode)
+{
+ if (nfs_have_delegated_attributes(inode))
+ return 0;
+ return nfs_attribute_timeout(inode);
+}
+
+/**
+ * nfs_revalidate_inode - Revalidate the inode attributes
+ * @server - pointer to nfs_server struct
+ * @inode - pointer to inode struct
+ *
+ * Updates inode attribute information by retrieving the data from the server.
+ */
+int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
+{
+ if (!(NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATTR)
+ && !nfs_attribute_cache_expired(inode))
+ return NFS_STALE(inode) ? -ESTALE : 0;
+ return __nfs_revalidate_inode(server, inode);
+}
+
+static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ if (mapping->nrpages != 0) {
+ int ret = invalidate_inode_pages2(mapping);
+ if (ret < 0)
+ return ret;
+ }
+ spin_lock(&inode->i_lock);
+ nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
+ if (S_ISDIR(inode->i_mode))
+ memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
+ spin_unlock(&inode->i_lock);
+ nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
+ nfs_fscache_reset_inode_cookie(inode);
+ dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n",
+ inode->i_sb->s_id, (long long)NFS_FILEID(inode));
+ return 0;
+}
+
+/**
+ * nfs_revalidate_mapping - Revalidate the pagecache
+ * @inode - pointer to host inode
+ * @mapping - pointer to mapping
+ */
+int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ int ret = 0;
+
+ if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
+ || nfs_attribute_cache_expired(inode)
+ || NFS_STALE(inode)) {
+ ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+ if (ret < 0)
+ goto out;
+ }
+ if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+ ret = nfs_invalidate_mapping(inode, mapping);
+out:
+ return ret;
+}
+
+static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ unsigned long ret = 0;
+
+ if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
+ && (fattr->valid & NFS_ATTR_FATTR_CHANGE)
+ && inode->i_version == fattr->pre_change_attr) {
+ inode->i_version = fattr->change_attr;
+ if (S_ISDIR(inode->i_mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+ ret |= NFS_INO_INVALID_ATTR;
+ }
+ /* If we have atomic WCC data, we may update some attributes */
+ if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
+ && (fattr->valid & NFS_ATTR_FATTR_CTIME)
+ && timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
+ memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+ ret |= NFS_INO_INVALID_ATTR;
+ }
+
+ if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
+ && (fattr->valid & NFS_ATTR_FATTR_MTIME)
+ && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
+ memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+ if (S_ISDIR(inode->i_mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+ ret |= NFS_INO_INVALID_ATTR;
+ }
+ if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
+ && (fattr->valid & NFS_ATTR_FATTR_SIZE)
+ && i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
+ && nfsi->npages == 0) {
+ i_size_write(inode, nfs_size_to_loff_t(fattr->size));
+ ret |= NFS_INO_INVALID_ATTR;
+ }
+ return ret;
+}
+
+/**
+ * nfs_check_inode_attributes - verify consistency of the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * Verifies the attribute cache. If we have just changed the attributes,
+ * so that fattr carries weak cache consistency data, then it may
+ * also update the ctime/mtime/change_attribute.
+ */
+static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fattr)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ loff_t cur_size, new_isize;
+ unsigned long invalid = 0;
+
+
+ /* Has the inode gone and changed behind our back? */
+ if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
+ return -EIO;
+ if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
+ return -EIO;
+
+ if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
+ inode->i_version != fattr->change_attr)
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+
+ /* Verify a few of the more important attributes */
+ if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+
+ if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
+ cur_size = i_size_read(inode);
+ new_isize = nfs_size_to_loff_t(fattr->size);
+ if (cur_size != new_isize && nfsi->npages == 0)
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ }
+
+ /* Have any file permissions changed? */
+ if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
+ invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+ if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && inode->i_uid != fattr->uid)
+ invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+ if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && inode->i_gid != fattr->gid)
+ invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
+
+ /* Has the link count changed? */
+ if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
+ invalid |= NFS_INO_INVALID_ATTR;
+
+ if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec_equal(&inode->i_atime, &fattr->atime))
+ invalid |= NFS_INO_INVALID_ATIME;
+
+ if (invalid != 0)
+ nfsi->cache_validity |= invalid;
+
+ nfsi->read_cache_jiffies = fattr->time_start;
+ return 0;
+}
+
+static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
+{
+ if (!(fattr->valid & NFS_ATTR_FATTR_CTIME))
+ return 0;
+ return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
+}
+
+static int nfs_size_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
+{
+ if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
+ return 0;
+ return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
+}
+
+static atomic_long_t nfs_attr_generation_counter;
+
+static unsigned long nfs_read_attr_generation_counter(void)
+{
+ return atomic_long_read(&nfs_attr_generation_counter);
+}
+
+unsigned long nfs_inc_attr_generation_counter(void)
+{
+ return atomic_long_inc_return(&nfs_attr_generation_counter);
+}
+
+void nfs_fattr_init(struct nfs_fattr *fattr)
+{
+ fattr->valid = 0;
+ fattr->time_start = jiffies;
+ fattr->gencount = nfs_inc_attr_generation_counter();
+ fattr->owner_name = NULL;
+ fattr->group_name = NULL;
+}
+
+struct nfs_fattr *nfs_alloc_fattr(void)
+{
+ struct nfs_fattr *fattr;
+
+ fattr = kmalloc(sizeof(*fattr), GFP_NOFS);
+ if (fattr != NULL)
+ nfs_fattr_init(fattr);
+ return fattr;
+}
+
+struct nfs_fh *nfs_alloc_fhandle(void)
+{
+ struct nfs_fh *fh;
+
+ fh = kmalloc(sizeof(struct nfs_fh), GFP_NOFS);
+ if (fh != NULL)
+ fh->size = 0;
+ return fh;
+}
+
+#ifdef NFS_DEBUG
+/*
+ * _nfs_display_fhandle_hash - calculate the crc32 hash for the filehandle
+ * in the same way that wireshark does
+ *
+ * @fh: file handle
+ *
+ * For debugging only.
+ */
+u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh)
+{
+ /* wireshark uses 32-bit AUTODIN crc and does a bitwise
+ * not on the result */
+ return ~crc32(0xFFFFFFFF, &fh->data[0], fh->size);
+}
+
+/*
+ * _nfs_display_fhandle - display an NFS file handle on the console
+ *
+ * @fh: file handle to display
+ * @caption: display caption
+ *
+ * For debugging only.
+ */
+void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption)
+{
+ unsigned short i;
+
+ if (fh == NULL || fh->size == 0) {
+ printk(KERN_DEFAULT "%s at %p is empty\n", caption, fh);
+ return;
+ }
+
+ printk(KERN_DEFAULT "%s at %p is %u bytes, crc: 0x%08x:\n",
+ caption, fh, fh->size, _nfs_display_fhandle_hash(fh));
+ for (i = 0; i < fh->size; i += 16) {
+ __be32 *pos = (__be32 *)&fh->data[i];
+
+ switch ((fh->size - i - 1) >> 2) {
+ case 0:
+ printk(KERN_DEFAULT " %08x\n",
+ be32_to_cpup(pos));
+ break;
+ case 1:
+ printk(KERN_DEFAULT " %08x %08x\n",
+ be32_to_cpup(pos), be32_to_cpup(pos + 1));
+ break;
+ case 2:
+ printk(KERN_DEFAULT " %08x %08x %08x\n",
+ be32_to_cpup(pos), be32_to_cpup(pos + 1),
+ be32_to_cpup(pos + 2));
+ break;
+ default:
+ printk(KERN_DEFAULT " %08x %08x %08x %08x\n",
+ be32_to_cpup(pos), be32_to_cpup(pos + 1),
+ be32_to_cpup(pos + 2), be32_to_cpup(pos + 3));
+ }
+ }
+}
+#endif
+
+/**
+ * nfs_inode_attrs_need_update - check if the inode attributes need updating
+ * @inode - pointer to inode
+ * @fattr - attributes
+ *
+ * Attempt to divine whether or not an RPC call reply carrying stale
+ * attributes got scheduled after another call carrying updated ones.
+ *
+ * To do so, the function first assumes that a more recent ctime means
+ * that the attributes in fattr are newer, however it also attempt to
+ * catch the case where ctime either didn't change, or went backwards
+ * (if someone reset the clock on the server) by looking at whether
+ * or not this RPC call was started after the inode was last updated.
+ * Note also the check for wraparound of 'attr_gencount'
+ *
+ * The function returns 'true' if it thinks the attributes in 'fattr' are
+ * more recent than the ones cached in the inode.
+ *
+ */
+static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
+{
+ const struct nfs_inode *nfsi = NFS_I(inode);
+
+ return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
+ nfs_ctime_need_update(inode, fattr) ||
+ nfs_size_need_update(inode, fattr) ||
+ ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
+}
+
+static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
+{
+ if (nfs_inode_attrs_need_update(inode, fattr))
+ return nfs_update_inode(inode, fattr);
+ return nfs_check_inode_attributes(inode, fattr);
+}
+
+/**
+ * nfs_refresh_inode - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * Check that an RPC call that returned attributes has not overlapped with
+ * other recent updates of the inode metadata, then decide whether it is
+ * safe to do a full update of the inode attributes, or whether just to
+ * call nfs_check_inode_attributes.
+ */
+int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+ int status;
+
+ if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+ return 0;
+ spin_lock(&inode->i_lock);
+ status = nfs_refresh_inode_locked(inode, fattr);
+ spin_unlock(&inode->i_lock);
+
+ return status;
+}
+
+static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ if (S_ISDIR(inode->i_mode))
+ nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+ if ((fattr->valid & NFS_ATTR_FATTR) == 0)
+ return 0;
+ return nfs_refresh_inode_locked(inode, fattr);
+}
+
+/**
+ * nfs_post_op_update_inode - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * After an operation that has changed the inode metadata, mark the
+ * attribute cache as being invalid, then try to update it.
+ *
+ * NB: if the server didn't return any post op attributes, this
+ * function will force the retrieval of attributes before the next
+ * NFS request. Thus it should be used only for operations that
+ * are expected to change one or more attributes, to avoid
+ * unnecessary NFS requests and trips through nfs_update_inode().
+ */
+int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+ int status;
+
+ spin_lock(&inode->i_lock);
+ status = nfs_post_op_update_inode_locked(inode, fattr);
+ spin_unlock(&inode->i_lock);
+ return status;
+}
+
+/**
+ * nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
+ * @inode - pointer to inode
+ * @fattr - updated attributes
+ *
+ * After an operation that has changed the inode metadata, mark the
+ * attribute cache as being invalid, then try to update it. Fake up
+ * weak cache consistency data, if none exist.
+ *
+ * This function is mainly designed to be used by the ->write_done() functions.
+ */
+int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
+{
+ int status;
+
+ spin_lock(&inode->i_lock);
+ /* Don't do a WCC update if these attributes are already stale */
+ if ((fattr->valid & NFS_ATTR_FATTR) == 0 ||
+ !nfs_inode_attrs_need_update(inode, fattr)) {
+ fattr->valid &= ~(NFS_ATTR_FATTR_PRECHANGE
+ | NFS_ATTR_FATTR_PRESIZE
+ | NFS_ATTR_FATTR_PREMTIME
+ | NFS_ATTR_FATTR_PRECTIME);
+ goto out_noforce;
+ }
+ if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
+ (fattr->valid & NFS_ATTR_FATTR_PRECHANGE) == 0) {
+ fattr->pre_change_attr = inode->i_version;
+ fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
+ }
+ if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
+ (fattr->valid & NFS_ATTR_FATTR_PRECTIME) == 0) {
+ memcpy(&fattr->pre_ctime, &inode->i_ctime, sizeof(fattr->pre_ctime));
+ fattr->valid |= NFS_ATTR_FATTR_PRECTIME;
+ }
+ if ((fattr->valid & NFS_ATTR_FATTR_MTIME) != 0 &&
+ (fattr->valid & NFS_ATTR_FATTR_PREMTIME) == 0) {
+ memcpy(&fattr->pre_mtime, &inode->i_mtime, sizeof(fattr->pre_mtime));
+ fattr->valid |= NFS_ATTR_FATTR_PREMTIME;
+ }
+ if ((fattr->valid & NFS_ATTR_FATTR_SIZE) != 0 &&
+ (fattr->valid & NFS_ATTR_FATTR_PRESIZE) == 0) {
+ fattr->pre_size = i_size_read(inode);
+ fattr->valid |= NFS_ATTR_FATTR_PRESIZE;
+ }
+out_noforce:
+ status = nfs_post_op_update_inode_locked(inode, fattr);
+ spin_unlock(&inode->i_lock);
+ return status;
+}
+
+/*
+ * Many nfs protocol calls return the new file attributes after
+ * an operation. Here we update the inode to reflect the state
+ * of the server's inode.
+ *
+ * This is a bit tricky because we have to make sure all dirty pages
+ * have been sent off to the server before calling invalidate_inode_pages.
+ * To make sure no other process adds more write requests while we try
+ * our best to flush them, we make them sleep during the attribute refresh.
+ *
+ * A very similar scenario holds for the dir cache.
+ */
+static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+{
+ struct nfs_server *server;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ loff_t cur_isize, new_isize;
+ unsigned long invalid = 0;
+ unsigned long now = jiffies;
+ unsigned long save_cache_validity;
+
+ dfprintk(VFS, "NFS: %s(%s/%ld fh_crc=0x%08x ct=%d info=0x%x)\n",
+ __func__, inode->i_sb->s_id, inode->i_ino,
+ nfs_display_fhandle_hash(NFS_FH(inode)),
+ atomic_read(&inode->i_count), fattr->valid);
+
+ if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
+ goto out_fileid;
+
+ /*
+ * Make sure the inode's type hasn't changed.
+ */
+ if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
+ goto out_changed;
+
+ server = NFS_SERVER(inode);
+ /* Update the fsid? */
+ if (S_ISDIR(inode->i_mode) && (fattr->valid & NFS_ATTR_FATTR_FSID) &&
+ !nfs_fsid_equal(&server->fsid, &fattr->fsid) &&
+ !IS_AUTOMOUNT(inode))
+ server->fsid = fattr->fsid;
+
+ /*
+ * Update the read time so we don't revalidate too often.
+ */
+ nfsi->read_cache_jiffies = fattr->time_start;
+
+ save_cache_validity = nfsi->cache_validity;
+ nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ATIME
+ | NFS_INO_REVAL_FORCED
+ | NFS_INO_REVAL_PAGECACHE);
+
+ /* Do atomic weak cache consistency updates */
+ invalid |= nfs_wcc_update_inode(inode, fattr);
+
+ /* More cache consistency checks */
+ if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
+ if (inode->i_version != fattr->change_attr) {
+ dprintk("NFS: change_attr change on server for file %s/%ld\n",
+ inode->i_sb->s_id, inode->i_ino);
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ if (S_ISDIR(inode->i_mode))
+ nfs_force_lookup_revalidate(inode);
+ inode->i_version = fattr->change_attr;
+ }
+ } else if (server->caps & NFS_CAP_CHANGE_ATTR)
+ invalid |= save_cache_validity;
+
+ if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
+ /* NFSv2/v3: Check if the mtime agrees */
+ if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
+ dprintk("NFS: mtime change on server for file %s/%ld\n",
+ inode->i_sb->s_id, inode->i_ino);
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+ if (S_ISDIR(inode->i_mode))
+ nfs_force_lookup_revalidate(inode);
+ memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+ }
+ } else if (server->caps & NFS_CAP_MTIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_DATA
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
+
+ if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
+ /* If ctime has changed we should definitely clear access+acl caches */
+ if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ /* and probably clear data for a directory too as utimes can cause
+ * havoc with our cache.
+ */
+ if (S_ISDIR(inode->i_mode)) {
+ invalid |= NFS_INO_INVALID_DATA;
+ nfs_force_lookup_revalidate(inode);
+ }
+ memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+ }
+ } else if (server->caps & NFS_CAP_CTIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
+ /* Check if our cached file size is stale */
+ if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
+ new_isize = nfs_size_to_loff_t(fattr->size);
+ cur_isize = i_size_read(inode);
+ if (new_isize != cur_isize) {
+ /* Do we perhaps have any outstanding writes, or has
+ * the file grown beyond our last write? */
+ if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) ||
+ new_isize > cur_isize) {
+ i_size_write(inode, new_isize);
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+ }
+ dprintk("NFS: isize change on server for file %s/%ld "
+ "(%Ld to %Ld)\n",
+ inode->i_sb->s_id,
+ inode->i_ino,
+ (long long)cur_isize,
+ (long long)new_isize);
+ }
+ } else
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_REVAL_FORCED);
+
+
+ if (fattr->valid & NFS_ATTR_FATTR_ATIME)
+ memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+ else if (server->caps & NFS_CAP_ATIME)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATIME
+ | NFS_INO_REVAL_FORCED);
+
+ if (fattr->valid & NFS_ATTR_FATTR_MODE) {
+ if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
+ umode_t newmode = inode->i_mode & S_IFMT;
+ newmode |= fattr->mode & S_IALLUGO;
+ inode->i_mode = newmode;
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ }
+ } else if (server->caps & NFS_CAP_MODE)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
+ if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
+ if (inode->i_uid != fattr->uid) {
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ inode->i_uid = fattr->uid;
+ }
+ } else if (server->caps & NFS_CAP_OWNER)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
+ if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
+ if (inode->i_gid != fattr->gid) {
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ inode->i_gid = fattr->gid;
+ }
+ } else if (server->caps & NFS_CAP_OWNER_GROUP)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_INVALID_ACCESS
+ | NFS_INO_INVALID_ACL
+ | NFS_INO_REVAL_FORCED);
+
+ if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
+ if (inode->i_nlink != fattr->nlink) {
+ invalid |= NFS_INO_INVALID_ATTR;
+ if (S_ISDIR(inode->i_mode))
+ invalid |= NFS_INO_INVALID_DATA;
+ set_nlink(inode, fattr->nlink);
+ }
+ } else if (server->caps & NFS_CAP_NLINK)
+ invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
+ | NFS_INO_REVAL_FORCED);
+
+ if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
+ /*
+ * report the blocks in 512byte units
+ */
+ inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
+ }
+ if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
+ inode->i_blocks = fattr->du.nfs2.blocks;
+
+ /* Update attrtimeo value if we're out of the unstable period */
+ if (invalid & NFS_INO_INVALID_ATTR) {
+ nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
+ nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = now;
+ nfsi->attr_gencount = nfs_inc_attr_generation_counter();
+ } else {
+ if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
+ if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
+ nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
+ nfsi->attrtimeo_timestamp = now;
+ }
+ }
+ invalid &= ~NFS_INO_INVALID_ATTR;
+ /* Don't invalidate the data if we were to blame */
+ if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
+ || S_ISLNK(inode->i_mode)))
+ invalid &= ~NFS_INO_INVALID_DATA;
+ if (!nfs_have_delegation(inode, FMODE_READ) ||
+ (save_cache_validity & NFS_INO_REVAL_FORCED))
+ nfsi->cache_validity |= invalid;
+
+ return 0;
+ out_changed:
+ /*
+ * Big trouble! The inode has become a different object.
+ */
+ printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
+ __func__, inode->i_ino, inode->i_mode, fattr->mode);
+ out_err:
+ /*
+ * No need to worry about unhashing the dentry, as the
+ * lookup validation will know that the inode is bad.
+ * (But we fall through to invalidate the caches.)
+ */
+ nfs_invalidate_inode(inode);
+ return -ESTALE;
+
+ out_fileid:
+ printk(KERN_ERR "NFS: server %s error: fileid changed\n"
+ "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
+ NFS_SERVER(inode)->nfs_client->cl_hostname, inode->i_sb->s_id,
+ (long long)nfsi->fileid, (long long)fattr->fileid);
+ goto out_err;
+}
+
+
+#ifdef CONFIG_NFS_V4
+
+/*
+ * Clean out any remaining NFSv4 state that might be left over due
+ * to open() calls that passed nfs_atomic_lookup, but failed to call
+ * nfs_open().
+ */
+void nfs4_evict_inode(struct inode *inode)
+{
+ truncate_inode_pages(&inode->i_data, 0);
+ end_writeback(inode);
+ pnfs_return_layout(inode);
+ pnfs_destroy_layout(NFS_I(inode));
+ /* If we are holding a delegation, return it! */
+ nfs_inode_return_delegation_noreclaim(inode);
+ /* First call standard NFS clear_inode() code */
+ nfs_clear_inode(inode);
+}
+#endif
+
+struct inode *nfs_alloc_inode(struct super_block *sb)
+{
+ struct nfs_inode *nfsi;
+ nfsi = (struct nfs_inode *)kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
+ if (!nfsi)
+ return NULL;
+ nfsi->flags = 0UL;
+ nfsi->cache_validity = 0UL;
+#ifdef CONFIG_NFS_V3_ACL
+ nfsi->acl_access = ERR_PTR(-EAGAIN);
+ nfsi->acl_default = ERR_PTR(-EAGAIN);
+#endif
+#ifdef CONFIG_NFS_V4
+ nfsi->nfs4_acl = NULL;
+#endif /* CONFIG_NFS_V4 */
+ return &nfsi->vfs_inode;
+}
+
+static void nfs_i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+ kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
+}
+
+void nfs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, nfs_i_callback);
+}
+
+static inline void nfs4_init_once(struct nfs_inode *nfsi)
+{
+#ifdef CONFIG_NFS_V4
+ INIT_LIST_HEAD(&nfsi->open_states);
+ nfsi->delegation = NULL;
+ nfsi->delegation_state = 0;
+ init_rwsem(&nfsi->rwsem);
+ nfsi->layout = NULL;
+ atomic_set(&nfsi->commits_outstanding, 0);
+#endif
+}
+
+static void init_once(void *foo)
+{
+ struct nfs_inode *nfsi = (struct nfs_inode *) foo;
+
+ inode_init_once(&nfsi->vfs_inode);
+ INIT_LIST_HEAD(&nfsi->open_files);
+ INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
+ INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
+ INIT_LIST_HEAD(&nfsi->commit_list);
+ nfsi->npages = 0;
+ nfsi->ncommit = 0;
+ atomic_set(&nfsi->silly_count, 1);
+ INIT_HLIST_HEAD(&nfsi->silly_list);
+ init_waitqueue_head(&nfsi->waitqueue);
+ nfs4_init_once(nfsi);
+}
+
+static int __init nfs_init_inodecache(void)
+{
+ nfs_inode_cachep = kmem_cache_create("nfs_inode_cache",
+ sizeof(struct nfs_inode),
+ 0, (SLAB_RECLAIM_ACCOUNT|
+ SLAB_MEM_SPREAD),
+ init_once);
+ if (nfs_inode_cachep == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nfs_destroy_inodecache(void)
+{
+ kmem_cache_destroy(nfs_inode_cachep);
+}
+
+struct workqueue_struct *nfsiod_workqueue;
+
+/*
+ * start up the nfsiod workqueue
+ */
+static int nfsiod_start(void)
+{
+ struct workqueue_struct *wq;
+ dprintk("RPC: creating workqueue nfsiod\n");
+ wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0);
+ if (wq == NULL)
+ return -ENOMEM;
+ nfsiod_workqueue = wq;
+ return 0;
+}
+
+/*
+ * Destroy the nfsiod workqueue
+ */
+static void nfsiod_stop(void)
+{
+ struct workqueue_struct *wq;
+
+ wq = nfsiod_workqueue;
+ if (wq == NULL)
+ return;
+ nfsiod_workqueue = NULL;
+ destroy_workqueue(wq);
+}
+
+int nfs_net_id;
+EXPORT_SYMBOL_GPL(nfs_net_id);
+
+static int nfs_net_init(struct net *net)
+{
+ nfs_clients_init(net);
+ return nfs_dns_resolver_cache_init(net);
+}
+
+static void nfs_net_exit(struct net *net)
+{
+ nfs_dns_resolver_cache_destroy(net);
+ nfs_cleanup_cb_ident_idr(net);
+}
+
+static struct pernet_operations nfs_net_ops = {
+ .init = nfs_net_init,
+ .exit = nfs_net_exit,
+ .id = &nfs_net_id,
+ .size = sizeof(struct nfs_net),
+};
+
+/*
+ * Initialize NFS
+ */
+static int __init init_nfs_fs(void)
+{
+ int err;
+
+ err = nfs_idmap_init();
+ if (err < 0)
+ goto out10;
+
+ err = nfs_dns_resolver_init();
+ if (err < 0)
+ goto out9;
+
+ err = register_pernet_subsys(&nfs_net_ops);
+ if (err < 0)
+ goto out8;
+
+ err = nfs_fscache_register();
+ if (err < 0)
+ goto out7;
+
+ err = nfsiod_start();
+ if (err)
+ goto out6;
+
+ err = nfs_fs_proc_init();
+ if (err)
+ goto out5;
+
+ err = nfs_init_nfspagecache();
+ if (err)
+ goto out4;
+
+ err = nfs_init_inodecache();
+ if (err)
+ goto out3;
+
+ err = nfs_init_readpagecache();
+ if (err)
+ goto out2;
+
+ err = nfs_init_writepagecache();
+ if (err)
+ goto out1;
+
+ err = nfs_init_directcache();
+ if (err)
+ goto out0;
+
+#ifdef CONFIG_PROC_FS
+ rpc_proc_register(&init_net, &nfs_rpcstat);
+#endif
+ if ((err = register_nfs_fs()) != 0)
+ goto out;
+ return 0;
+out:
+#ifdef CONFIG_PROC_FS
+ rpc_proc_unregister(&init_net, "nfs");
+#endif
+ nfs_destroy_directcache();
+out0:
+ nfs_destroy_writepagecache();
+out1:
+ nfs_destroy_readpagecache();
+out2:
+ nfs_destroy_inodecache();
+out3:
+ nfs_destroy_nfspagecache();
+out4:
+ nfs_fs_proc_exit();
+out5:
+ nfsiod_stop();
+out6:
+ nfs_fscache_unregister();
+out7:
+ unregister_pernet_subsys(&nfs_net_ops);
+out8:
+ nfs_dns_resolver_destroy();
+out9:
+ nfs_idmap_quit();
+out10:
+ return err;
+}
+
+static void __exit exit_nfs_fs(void)
+{
+ nfs_destroy_directcache();
+ nfs_destroy_writepagecache();
+ nfs_destroy_readpagecache();
+ nfs_destroy_inodecache();
+ nfs_destroy_nfspagecache();
+ nfs_fscache_unregister();
+ unregister_pernet_subsys(&nfs_net_ops);
+ nfs_dns_resolver_destroy();
+ nfs_idmap_quit();
+#ifdef CONFIG_PROC_FS
+ rpc_proc_unregister(&init_net, "nfs");
+#endif
+ unregister_nfs_fs();
+ nfs_fs_proc_exit();
+ nfsiod_stop();
+}
+
+/* Not quite true; I just maintain it */
+MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+MODULE_LICENSE("GPL");
+module_param(enable_ino64, bool, 0644);
+
+module_init(init_nfs_fs)
+module_exit(exit_nfs_fs)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
new file mode 100644
index 00000000..b777bdab
--- /dev/null
+++ b/fs/nfs/internal.h
@@ -0,0 +1,468 @@
+/*
+ * NFS internal definitions
+ */
+
+#include "nfs4_fs.h"
+#include <linux/mount.h>
+#include <linux/security.h>
+
+#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
+
+struct nfs_string;
+
+/* Maximum number of readahead requests
+ * FIXME: this should really be a sysctl so that users may tune it to suit
+ * their needs. People that do NFS over a slow network, might for
+ * instance want to reduce it to something closer to 1 for improved
+ * interactive response.
+ */
+#define NFS_MAX_READAHEAD (RPC_DEF_SLOT_TABLE - 1)
+
+/*
+ * Determine if sessions are in use.
+ */
+static inline int nfs4_has_session(const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (clp->cl_session)
+ return 1;
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
+static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp))
+ return (clp->cl_session->flags & SESSION4_PERSIST);
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
+static inline void nfs_attr_check_mountpoint(struct super_block *parent, struct nfs_fattr *fattr)
+{
+ if (!nfs_fsid_equal(&NFS_SB(parent)->fsid, &fattr->fsid))
+ fattr->valid |= NFS_ATTR_FATTR_MOUNTPOINT;
+}
+
+static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
+{
+ if (((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) == 0) ||
+ (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
+ ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
+ return 0;
+
+ fattr->fileid = fattr->mounted_on_fileid;
+ return 1;
+}
+
+struct nfs_clone_mount {
+ const struct super_block *sb;
+ const struct dentry *dentry;
+ struct nfs_fh *fh;
+ struct nfs_fattr *fattr;
+ char *hostname;
+ char *mnt_path;
+ struct sockaddr *addr;
+ size_t addrlen;
+ rpc_authflavor_t authflavor;
+};
+
+/*
+ * Note: RFC 1813 doesn't limit the number of auth flavors that
+ * a server can return, so make something up.
+ */
+#define NFS_MAX_SECFLAVORS (12)
+
+/*
+ * Value used if the user did not specify a port value.
+ */
+#define NFS_UNSPEC_PORT (-1)
+
+/*
+ * Maximum number of pages that readdir can use for creating
+ * a vmapped array of pages.
+ */
+#define NFS_MAX_READDIR_PAGES 8
+
+/*
+ * In-kernel mount arguments
+ */
+struct nfs_parsed_mount_data {
+ int flags;
+ int rsize, wsize;
+ int timeo, retrans;
+ int acregmin, acregmax,
+ acdirmin, acdirmax;
+ int namlen;
+ unsigned int options;
+ unsigned int bsize;
+ unsigned int auth_flavor_len;
+ rpc_authflavor_t auth_flavors[1];
+ char *client_address;
+ unsigned int version;
+ unsigned int minorversion;
+ char *fscache_uniq;
+
+ struct {
+ struct sockaddr_storage address;
+ size_t addrlen;
+ char *hostname;
+ u32 version;
+ int port;
+ unsigned short protocol;
+ } mount_server;
+
+ struct {
+ struct sockaddr_storage address;
+ size_t addrlen;
+ char *hostname;
+ char *export_path;
+ int port;
+ unsigned short protocol;
+ } nfs_server;
+
+ struct security_mnt_opts lsm_opts;
+ struct net *net;
+};
+
+/* mount_clnt.c */
+struct nfs_mount_request {
+ struct sockaddr *sap;
+ size_t salen;
+ char *hostname;
+ char *dirpath;
+ u32 version;
+ unsigned short protocol;
+ struct nfs_fh *fh;
+ int noresvport;
+ unsigned int *auth_flav_len;
+ rpc_authflavor_t *auth_flavs;
+ struct net *net;
+};
+
+extern int nfs_mount(struct nfs_mount_request *info);
+extern void nfs_umount(const struct nfs_mount_request *info);
+
+/* client.c */
+extern const struct rpc_program nfs_program;
+extern void nfs_clients_init(struct net *net);
+
+extern void nfs_cleanup_cb_ident_idr(struct net *);
+extern void nfs_put_client(struct nfs_client *);
+extern struct nfs_client *nfs4_find_client_ident(struct net *, int);
+extern struct nfs_client *
+nfs4_find_client_sessionid(struct net *, const struct sockaddr *,
+ struct nfs4_sessionid *);
+extern struct nfs_server *nfs_create_server(
+ const struct nfs_parsed_mount_data *,
+ struct nfs_fh *);
+extern struct nfs_server *nfs4_create_server(
+ const struct nfs_parsed_mount_data *,
+ struct nfs_fh *);
+extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
+ struct nfs_fh *);
+extern void nfs_free_server(struct nfs_server *server);
+extern struct nfs_server *nfs_clone_server(struct nfs_server *,
+ struct nfs_fh *,
+ struct nfs_fattr *,
+ rpc_authflavor_t);
+extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
+extern int nfs4_check_client_ready(struct nfs_client *clp);
+extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
+ const struct sockaddr *ds_addr,
+ int ds_addrlen, int ds_proto);
+#ifdef CONFIG_PROC_FS
+extern int __init nfs_fs_proc_init(void);
+extern void nfs_fs_proc_exit(void);
+#else
+static inline int nfs_fs_proc_init(void)
+{
+ return 0;
+}
+static inline void nfs_fs_proc_exit(void)
+{
+}
+#endif
+
+/* nfs4namespace.c */
+#ifdef CONFIG_NFS_V4
+extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry);
+#else
+static inline
+struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
+{
+ return ERR_PTR(-ENOENT);
+}
+#endif
+
+/* callback_xdr.c */
+extern struct svc_version nfs4_callback_version1;
+extern struct svc_version nfs4_callback_version4;
+
+/* pagelist.c */
+extern int __init nfs_init_nfspagecache(void);
+extern void nfs_destroy_nfspagecache(void);
+extern int __init nfs_init_readpagecache(void);
+extern void nfs_destroy_readpagecache(void);
+extern int __init nfs_init_writepagecache(void);
+extern void nfs_destroy_writepagecache(void);
+
+extern int __init nfs_init_directcache(void);
+extern void nfs_destroy_directcache(void);
+
+/* nfs2xdr.c */
+extern int nfs_stat_to_errno(enum nfs_stat);
+extern struct rpc_procinfo nfs_procedures[];
+extern int nfs2_decode_dirent(struct xdr_stream *,
+ struct nfs_entry *, int);
+
+/* nfs3xdr.c */
+extern struct rpc_procinfo nfs3_procedures[];
+extern int nfs3_decode_dirent(struct xdr_stream *,
+ struct nfs_entry *, int);
+
+/* nfs4xdr.c */
+#ifdef CONFIG_NFS_V4
+extern int nfs4_decode_dirent(struct xdr_stream *,
+ struct nfs_entry *, int);
+#endif
+#ifdef CONFIG_NFS_V4_1
+extern const u32 nfs41_maxread_overhead;
+extern const u32 nfs41_maxwrite_overhead;
+#endif
+
+/* nfs4proc.c */
+#ifdef CONFIG_NFS_V4
+extern struct rpc_procinfo nfs4_procedures[];
+#endif
+
+extern int nfs4_init_ds_session(struct nfs_client *clp);
+
+/* proc.c */
+void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
+extern int nfs_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr, rpc_authflavor_t authflavour,
+ int noresvport);
+
+/* dir.c */
+extern int nfs_access_cache_shrinker(struct shrinker *shrink,
+ struct shrink_control *sc);
+
+/* inode.c */
+extern struct workqueue_struct *nfsiod_workqueue;
+extern struct inode *nfs_alloc_inode(struct super_block *sb);
+extern void nfs_destroy_inode(struct inode *);
+extern int nfs_write_inode(struct inode *, struct writeback_control *);
+extern void nfs_evict_inode(struct inode *);
+#ifdef CONFIG_NFS_V4
+extern void nfs4_evict_inode(struct inode *);
+#endif
+void nfs_zap_acl_cache(struct inode *inode);
+extern int nfs_wait_bit_killable(void *word);
+
+/* super.c */
+extern struct file_system_type nfs_xdev_fs_type;
+#ifdef CONFIG_NFS_V4
+extern struct file_system_type nfs4_xdev_fs_type;
+extern struct file_system_type nfs4_referral_fs_type;
+#endif
+
+extern struct rpc_stat nfs_rpcstat;
+
+extern int __init register_nfs_fs(void);
+extern void __exit unregister_nfs_fs(void);
+extern void nfs_sb_active(struct super_block *sb);
+extern void nfs_sb_deactive(struct super_block *sb);
+
+/* namespace.c */
+extern char *nfs_path(char **p, struct dentry *dentry,
+ char *buffer, ssize_t buflen);
+extern struct vfsmount *nfs_d_automount(struct path *path);
+#ifdef CONFIG_NFS_V4
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
+#endif
+
+/* getroot.c */
+extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
+ const char *);
+#ifdef CONFIG_NFS_V4
+extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
+ const char *);
+
+extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
+#endif
+
+struct nfs_pageio_descriptor;
+/* read.c */
+extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops);
+extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
+extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
+ struct list_head *head);
+
+extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode);
+extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
+extern void nfs_readdata_release(struct nfs_read_data *rdata);
+
+/* write.c */
+extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
+ struct list_head *head);
+extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags);
+extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
+extern void nfs_writedata_release(struct nfs_write_data *wdata);
+extern void nfs_commit_free(struct nfs_write_data *p);
+extern int nfs_initiate_write(struct nfs_write_data *data,
+ struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how);
+extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
+extern int nfs_initiate_commit(struct nfs_write_data *data,
+ struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how);
+extern void nfs_init_commit(struct nfs_write_data *data,
+ struct list_head *head,
+ struct pnfs_layout_segment *lseg);
+void nfs_retry_commit(struct list_head *page_list,
+ struct pnfs_layout_segment *lseg);
+void nfs_commit_clear_lock(struct nfs_inode *nfsi);
+void nfs_commitdata_release(void *data);
+void nfs_commit_release_pages(struct nfs_write_data *data);
+void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head);
+void nfs_request_remove_commit_list(struct nfs_page *req);
+
+#ifdef CONFIG_MIGRATION
+extern int nfs_migrate_page(struct address_space *,
+ struct page *, struct page *, enum migrate_mode);
+#else
+#define nfs_migrate_page NULL
+#endif
+
+/* nfs4proc.c */
+extern void __nfs4_read_done_cb(struct nfs_read_data *);
+extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data);
+extern int nfs4_init_client(struct nfs_client *clp,
+ const struct rpc_timeout *timeparms,
+ const char *ip_addr,
+ rpc_authflavor_t authflavour,
+ int noresvport);
+extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
+extern int _nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+extern int _nfs4_call_sync_session(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+
+/*
+ * Determine the device name as a string
+ */
+static inline char *nfs_devname(struct dentry *dentry,
+ char *buffer, ssize_t buflen)
+{
+ char *dummy;
+ return nfs_path(&dummy, dentry, buffer, buflen);
+}
+
+/*
+ * Determine the actual block size (and log2 thereof)
+ */
+static inline
+unsigned long nfs_block_bits(unsigned long bsize, unsigned char *nrbitsp)
+{
+ /* make sure blocksize is a power of two */
+ if ((bsize & (bsize - 1)) || nrbitsp) {
+ unsigned char nrbits;
+
+ for (nrbits = 31; nrbits && !(bsize & (1 << nrbits)); nrbits--)
+ ;
+ bsize = 1 << nrbits;
+ if (nrbitsp)
+ *nrbitsp = nrbits;
+ }
+
+ return bsize;
+}
+
+/*
+ * Calculate the number of 512byte blocks used.
+ */
+static inline blkcnt_t nfs_calc_block_size(u64 tsize)
+{
+ blkcnt_t used = (tsize + 511) >> 9;
+ return (used > ULONG_MAX) ? ULONG_MAX : used;
+}
+
+/*
+ * Compute and set NFS server blocksize
+ */
+static inline
+unsigned long nfs_block_size(unsigned long bsize, unsigned char *nrbitsp)
+{
+ if (bsize < NFS_MIN_FILE_IO_SIZE)
+ bsize = NFS_DEF_FILE_IO_SIZE;
+ else if (bsize >= NFS_MAX_FILE_IO_SIZE)
+ bsize = NFS_MAX_FILE_IO_SIZE;
+
+ return nfs_block_bits(bsize, nrbitsp);
+}
+
+/*
+ * Determine the maximum file size for a superblock
+ */
+static inline
+void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
+{
+ sb->s_maxbytes = (loff_t)maxfilesize;
+ if (sb->s_maxbytes > MAX_LFS_FILESIZE || sb->s_maxbytes <= 0)
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+}
+
+/*
+ * Determine the number of bytes of data the page contains
+ */
+static inline
+unsigned int nfs_page_length(struct page *page)
+{
+ loff_t i_size = i_size_read(page->mapping->host);
+
+ if (i_size > 0) {
+ pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+ if (page->index < end_index)
+ return PAGE_CACHE_SIZE;
+ if (page->index == end_index)
+ return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1;
+ }
+ return 0;
+}
+
+/*
+ * Convert a umode to a dirent->d_type
+ */
+static inline
+unsigned char nfs_umode_to_dtype(umode_t mode)
+{
+ return (mode >> 12) & 15;
+}
+
+/*
+ * Determine the number of pages in an array of length 'len' and
+ * with a base offset of 'base'
+ */
+static inline
+unsigned int nfs_page_array_len(unsigned int base, size_t len)
+{
+ return ((unsigned long)len + (unsigned long)base +
+ PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
new file mode 100644
index 00000000..c5832487
--- /dev/null
+++ b/fs/nfs/iostat.h
@@ -0,0 +1,71 @@
+/*
+ * linux/fs/nfs/iostat.h
+ *
+ * Declarations for NFS client per-mount statistics
+ *
+ * Copyright (C) 2005, 2006 Chuck Lever <cel@netapp.com>
+ *
+ */
+
+#ifndef _NFS_IOSTAT
+#define _NFS_IOSTAT
+
+#include <linux/percpu.h>
+#include <linux/cache.h>
+#include <linux/nfs_iostat.h>
+
+struct nfs_iostats {
+ unsigned long long bytes[__NFSIOS_BYTESMAX];
+#ifdef CONFIG_NFS_FSCACHE
+ unsigned long long fscache[__NFSIOS_FSCACHEMAX];
+#endif
+ unsigned long events[__NFSIOS_COUNTSMAX];
+} ____cacheline_aligned;
+
+static inline void nfs_inc_server_stats(const struct nfs_server *server,
+ enum nfs_stat_eventcounters stat)
+{
+ this_cpu_inc(server->io_stats->events[stat]);
+}
+
+static inline void nfs_inc_stats(const struct inode *inode,
+ enum nfs_stat_eventcounters stat)
+{
+ nfs_inc_server_stats(NFS_SERVER(inode), stat);
+}
+
+static inline void nfs_add_server_stats(const struct nfs_server *server,
+ enum nfs_stat_bytecounters stat,
+ long addend)
+{
+ this_cpu_add(server->io_stats->bytes[stat], addend);
+}
+
+static inline void nfs_add_stats(const struct inode *inode,
+ enum nfs_stat_bytecounters stat,
+ long addend)
+{
+ nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
+}
+
+#ifdef CONFIG_NFS_FSCACHE
+static inline void nfs_add_fscache_stats(struct inode *inode,
+ enum nfs_stat_fscachecounters stat,
+ long addend)
+{
+ this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
+}
+#endif
+
+static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
+{
+ return alloc_percpu(struct nfs_iostats);
+}
+
+static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats)
+{
+ if (stats != NULL)
+ free_percpu(stats);
+}
+
+#endif /* _NFS_IOSTAT */
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
new file mode 100644
index 00000000..8e65c7f1
--- /dev/null
+++ b/fs/nfs/mount_clnt.c
@@ -0,0 +1,518 @@
+/*
+ * In-kernel MOUNT protocol client
+ *
+ * Copyright (C) 1997, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/uio.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/nfs_fs.h>
+#include "internal.h"
+
+#ifdef NFS_DEBUG
+# define NFSDBG_FACILITY NFSDBG_MOUNT
+#endif
+
+/*
+ * Defined by RFC 1094, section A.3; and RFC 1813, section 5.1.4
+ */
+#define MNTPATHLEN (1024)
+
+/*
+ * XDR data type sizes
+ */
+#define encode_dirpath_sz (1 + XDR_QUADLEN(MNTPATHLEN))
+#define MNT_status_sz (1)
+#define MNT_fhs_status_sz (1)
+#define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE)
+#define MNT_fhandle3_sz (1 + XDR_QUADLEN(NFS3_FHSIZE))
+#define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS)
+
+/*
+ * XDR argument and result sizes
+ */
+#define MNT_enc_dirpath_sz encode_dirpath_sz
+#define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz)
+#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \
+ MNT_authflav3_sz)
+
+/*
+ * Defined by RFC 1094, section A.5
+ */
+enum {
+ MOUNTPROC_NULL = 0,
+ MOUNTPROC_MNT = 1,
+ MOUNTPROC_DUMP = 2,
+ MOUNTPROC_UMNT = 3,
+ MOUNTPROC_UMNTALL = 4,
+ MOUNTPROC_EXPORT = 5,
+};
+
+/*
+ * Defined by RFC 1813, section 5.2
+ */
+enum {
+ MOUNTPROC3_NULL = 0,
+ MOUNTPROC3_MNT = 1,
+ MOUNTPROC3_DUMP = 2,
+ MOUNTPROC3_UMNT = 3,
+ MOUNTPROC3_UMNTALL = 4,
+ MOUNTPROC3_EXPORT = 5,
+};
+
+static const struct rpc_program mnt_program;
+
+/*
+ * Defined by OpenGroup XNFS Version 3W, chapter 8
+ */
+enum mountstat {
+ MNT_OK = 0,
+ MNT_EPERM = 1,
+ MNT_ENOENT = 2,
+ MNT_EACCES = 13,
+ MNT_EINVAL = 22,
+};
+
+static struct {
+ u32 status;
+ int errno;
+} mnt_errtbl[] = {
+ { .status = MNT_OK, .errno = 0, },
+ { .status = MNT_EPERM, .errno = -EPERM, },
+ { .status = MNT_ENOENT, .errno = -ENOENT, },
+ { .status = MNT_EACCES, .errno = -EACCES, },
+ { .status = MNT_EINVAL, .errno = -EINVAL, },
+};
+
+/*
+ * Defined by RFC 1813, section 5.1.5
+ */
+enum mountstat3 {
+ MNT3_OK = 0, /* no error */
+ MNT3ERR_PERM = 1, /* Not owner */
+ MNT3ERR_NOENT = 2, /* No such file or directory */
+ MNT3ERR_IO = 5, /* I/O error */
+ MNT3ERR_ACCES = 13, /* Permission denied */
+ MNT3ERR_NOTDIR = 20, /* Not a directory */
+ MNT3ERR_INVAL = 22, /* Invalid argument */
+ MNT3ERR_NAMETOOLONG = 63, /* Filename too long */
+ MNT3ERR_NOTSUPP = 10004, /* Operation not supported */
+ MNT3ERR_SERVERFAULT = 10006, /* A failure on the server */
+};
+
+static struct {
+ u32 status;
+ int errno;
+} mnt3_errtbl[] = {
+ { .status = MNT3_OK, .errno = 0, },
+ { .status = MNT3ERR_PERM, .errno = -EPERM, },
+ { .status = MNT3ERR_NOENT, .errno = -ENOENT, },
+ { .status = MNT3ERR_IO, .errno = -EIO, },
+ { .status = MNT3ERR_ACCES, .errno = -EACCES, },
+ { .status = MNT3ERR_NOTDIR, .errno = -ENOTDIR, },
+ { .status = MNT3ERR_INVAL, .errno = -EINVAL, },
+ { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
+ { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
+ { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
+};
+
+struct mountres {
+ int errno;
+ struct nfs_fh *fh;
+ unsigned int *auth_count;
+ rpc_authflavor_t *auth_flavors;
+};
+
+struct mnt_fhstatus {
+ u32 status;
+ struct nfs_fh *fh;
+};
+
+/**
+ * nfs_mount - Obtain an NFS file handle for the given host and path
+ * @info: pointer to mount request arguments
+ *
+ * Uses default timeout parameters specified by underlying transport.
+ */
+int nfs_mount(struct nfs_mount_request *info)
+{
+ struct mountres result = {
+ .fh = info->fh,
+ .auth_count = info->auth_flav_len,
+ .auth_flavors = info->auth_flavs,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = info->dirpath,
+ .rpc_resp = &result,
+ };
+ struct rpc_create_args args = {
+ .net = info->net,
+ .protocol = info->protocol,
+ .address = info->sap,
+ .addrsize = info->salen,
+ .servername = info->hostname,
+ .program = &mnt_program,
+ .version = info->version,
+ .authflavor = RPC_AUTH_UNIX,
+ };
+ struct rpc_clnt *mnt_clnt;
+ int status;
+
+ dprintk("NFS: sending MNT request for %s:%s\n",
+ (info->hostname ? info->hostname : "server"),
+ info->dirpath);
+
+ if (info->noresvport)
+ args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
+
+ mnt_clnt = rpc_create(&args);
+ if (IS_ERR(mnt_clnt))
+ goto out_clnt_err;
+
+ if (info->version == NFS_MNT3_VERSION)
+ msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC3_MNT];
+ else
+ msg.rpc_proc = &mnt_clnt->cl_procinfo[MOUNTPROC_MNT];
+
+ status = rpc_call_sync(mnt_clnt, &msg, 0);
+ rpc_shutdown_client(mnt_clnt);
+
+ if (status < 0)
+ goto out_call_err;
+ if (result.errno != 0)
+ goto out_mnt_err;
+
+ dprintk("NFS: MNT request succeeded\n");
+ status = 0;
+
+out:
+ return status;
+
+out_clnt_err:
+ status = PTR_ERR(mnt_clnt);
+ dprintk("NFS: failed to create MNT RPC client, status=%d\n", status);
+ goto out;
+
+out_call_err:
+ dprintk("NFS: MNT request failed, status=%d\n", status);
+ goto out;
+
+out_mnt_err:
+ dprintk("NFS: MNT server returned result %d\n", result.errno);
+ status = result.errno;
+ goto out;
+}
+
+/**
+ * nfs_umount - Notify a server that we have unmounted this export
+ * @info: pointer to umount request arguments
+ *
+ * MOUNTPROC_UMNT is advisory, so we set a short timeout, and always
+ * use UDP.
+ */
+void nfs_umount(const struct nfs_mount_request *info)
+{
+ static const struct rpc_timeout nfs_umnt_timeout = {
+ .to_initval = 1 * HZ,
+ .to_maxval = 3 * HZ,
+ .to_retries = 2,
+ };
+ struct rpc_create_args args = {
+ .net = info->net,
+ .protocol = IPPROTO_UDP,
+ .address = info->sap,
+ .addrsize = info->salen,
+ .timeout = &nfs_umnt_timeout,
+ .servername = info->hostname,
+ .program = &mnt_program,
+ .version = info->version,
+ .authflavor = RPC_AUTH_UNIX,
+ .flags = RPC_CLNT_CREATE_NOPING,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = info->dirpath,
+ };
+ struct rpc_clnt *clnt;
+ int status;
+
+ if (info->noresvport)
+ args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
+
+ clnt = rpc_create(&args);
+ if (IS_ERR(clnt))
+ goto out_clnt_err;
+
+ dprintk("NFS: sending UMNT request for %s:%s\n",
+ (info->hostname ? info->hostname : "server"), info->dirpath);
+
+ if (info->version == NFS_MNT3_VERSION)
+ msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC3_UMNT];
+ else
+ msg.rpc_proc = &clnt->cl_procinfo[MOUNTPROC_UMNT];
+
+ status = rpc_call_sync(clnt, &msg, 0);
+ rpc_shutdown_client(clnt);
+
+ if (unlikely(status < 0))
+ goto out_call_err;
+
+ return;
+
+out_clnt_err:
+ dprintk("NFS: failed to create UMNT RPC client, status=%ld\n",
+ PTR_ERR(clnt));
+ return;
+
+out_call_err:
+ dprintk("NFS: UMNT request failed, status=%d\n", status);
+}
+
+/*
+ * XDR encode/decode functions for MOUNT
+ */
+
+static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+{
+ const u32 pathname_len = strlen(pathname);
+ __be32 *p;
+
+ BUG_ON(pathname_len > MNTPATHLEN);
+ p = xdr_reserve_space(xdr, 4 + pathname_len);
+ xdr_encode_opaque(p, pathname, pathname_len);
+}
+
+static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const char *dirpath)
+{
+ encode_mntdirpath(xdr, dirpath);
+}
+
+/*
+ * RFC 1094: "A non-zero status indicates some sort of error. In this
+ * case, the status is a UNIX error number." This can be problematic
+ * if the server and client use different errno values for the same
+ * error.
+ *
+ * However, the OpenGroup XNFS spec provides a simple mapping that is
+ * independent of local errno values on the server and the client.
+ */
+static int decode_status(struct xdr_stream *xdr, struct mountres *res)
+{
+ unsigned int i;
+ u32 status;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ return -EIO;
+ status = be32_to_cpup(p);
+
+ for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
+ if (mnt_errtbl[i].status == status) {
+ res->errno = mnt_errtbl[i].errno;
+ return 0;
+ }
+ }
+
+ dprintk("NFS: unrecognized MNT status code: %u\n", status);
+ res->errno = -EACCES;
+ return 0;
+}
+
+static int decode_fhandle(struct xdr_stream *xdr, struct mountres *res)
+{
+ struct nfs_fh *fh = res->fh;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ fh->size = NFS2_FHSIZE;
+ memcpy(fh->data, p, NFS2_FHSIZE);
+ return 0;
+}
+
+static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct mountres *res)
+{
+ int status;
+
+ status = decode_status(xdr, res);
+ if (unlikely(status != 0 || res->errno != 0))
+ return status;
+ return decode_fhandle(xdr, res);
+}
+
+static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
+{
+ unsigned int i;
+ u32 status;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ return -EIO;
+ status = be32_to_cpup(p);
+
+ for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
+ if (mnt3_errtbl[i].status == status) {
+ res->errno = mnt3_errtbl[i].errno;
+ return 0;
+ }
+ }
+
+ dprintk("NFS: unrecognized MNT3 status code: %u\n", status);
+ res->errno = -EACCES;
+ return 0;
+}
+
+static int decode_fhandle3(struct xdr_stream *xdr, struct mountres *res)
+{
+ struct nfs_fh *fh = res->fh;
+ u32 size;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ size = be32_to_cpup(p);
+ if (size > NFS3_FHSIZE || size == 0)
+ return -EIO;
+
+ p = xdr_inline_decode(xdr, size);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ fh->size = size;
+ memcpy(fh->data, p, size);
+ return 0;
+}
+
+static int decode_auth_flavors(struct xdr_stream *xdr, struct mountres *res)
+{
+ rpc_authflavor_t *flavors = res->auth_flavors;
+ unsigned int *count = res->auth_count;
+ u32 entries, i;
+ __be32 *p;
+
+ if (*count == 0)
+ return 0;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ return -EIO;
+ entries = be32_to_cpup(p);
+ dprintk("NFS: received %u auth flavors\n", entries);
+ if (entries > NFS_MAX_SECFLAVORS)
+ entries = NFS_MAX_SECFLAVORS;
+
+ p = xdr_inline_decode(xdr, 4 * entries);
+ if (unlikely(p == NULL))
+ return -EIO;
+
+ if (entries > *count)
+ entries = *count;
+
+ for (i = 0; i < entries; i++) {
+ flavors[i] = be32_to_cpup(p++);
+ dprintk("NFS: auth flavor[%u]: %d\n", i, flavors[i]);
+ }
+ *count = i;
+
+ return 0;
+}
+
+static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct mountres *res)
+{
+ int status;
+
+ status = decode_fhs_status(xdr, res);
+ if (unlikely(status != 0 || res->errno != 0))
+ return status;
+ status = decode_fhandle3(xdr, res);
+ if (unlikely(status != 0)) {
+ res->errno = -EBADHANDLE;
+ return 0;
+ }
+ return decode_auth_flavors(xdr, res);
+}
+
+static struct rpc_procinfo mnt_procedures[] = {
+ [MOUNTPROC_MNT] = {
+ .p_proc = MOUNTPROC_MNT,
+ .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
+ .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_replen = MNT_dec_mountres_sz,
+ .p_statidx = MOUNTPROC_MNT,
+ .p_name = "MOUNT",
+ },
+ [MOUNTPROC_UMNT] = {
+ .p_proc = MOUNTPROC_UMNT,
+ .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_statidx = MOUNTPROC_UMNT,
+ .p_name = "UMOUNT",
+ },
+};
+
+static struct rpc_procinfo mnt3_procedures[] = {
+ [MOUNTPROC3_MNT] = {
+ .p_proc = MOUNTPROC3_MNT,
+ .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
+ .p_decode = (kxdrdproc_t)mnt_xdr_dec_mountres3,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_replen = MNT_dec_mountres3_sz,
+ .p_statidx = MOUNTPROC3_MNT,
+ .p_name = "MOUNT",
+ },
+ [MOUNTPROC3_UMNT] = {
+ .p_proc = MOUNTPROC3_UMNT,
+ .p_encode = (kxdreproc_t)mnt_xdr_enc_dirpath,
+ .p_arglen = MNT_enc_dirpath_sz,
+ .p_statidx = MOUNTPROC3_UMNT,
+ .p_name = "UMOUNT",
+ },
+};
+
+
+static const struct rpc_version mnt_version1 = {
+ .number = 1,
+ .nrprocs = ARRAY_SIZE(mnt_procedures),
+ .procs = mnt_procedures,
+};
+
+static const struct rpc_version mnt_version3 = {
+ .number = 3,
+ .nrprocs = ARRAY_SIZE(mnt3_procedures),
+ .procs = mnt3_procedures,
+};
+
+static const struct rpc_version *mnt_version[] = {
+ NULL,
+ &mnt_version1,
+ NULL,
+ &mnt_version3,
+};
+
+static struct rpc_stat mnt_stats;
+
+static const struct rpc_program mnt_program = {
+ .name = "mount",
+ .number = NFS_MNT_PROGRAM,
+ .nrvers = ARRAY_SIZE(mnt_version),
+ .version = mnt_version,
+ .stats = &mnt_stats,
+};
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
new file mode 100644
index 00000000..d51868e5
--- /dev/null
+++ b/fs/nfs/namespace.c
@@ -0,0 +1,335 @@
+/*
+ * linux/fs/nfs/namespace.c
+ *
+ * Copyright (C) 2005 Trond Myklebust <Trond.Myklebust@netapp.com>
+ * - Modified by David Howells <dhowells@redhat.com>
+ *
+ * NFS namespace
+ */
+
+#include <linux/dcache.h>
+#include <linux/gfp.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/nfs_fs.h>
+#include <linux/string.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/vfs.h>
+#include <linux/sunrpc/gss_api.h>
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+static void nfs_expire_automounts(struct work_struct *work);
+
+static LIST_HEAD(nfs_automount_list);
+static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
+int nfs_mountpoint_expiry_timeout = 500 * HZ;
+
+static struct vfsmount *nfs_do_submount(struct dentry *dentry,
+ struct nfs_fh *fh,
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t authflavor);
+
+/*
+ * nfs_path - reconstruct the path given an arbitrary dentry
+ * @base - used to return pointer to the end of devname part of path
+ * @dentry - pointer to dentry
+ * @buffer - result buffer
+ * @buflen - length of buffer
+ *
+ * Helper function for constructing the server pathname
+ * by arbitrary hashed dentry.
+ *
+ * This is mainly for use in figuring out the path on the
+ * server side when automounting on top of an existing partition
+ * and in generating /proc/mounts and friends.
+ */
+char *nfs_path(char **p, struct dentry *dentry, char *buffer, ssize_t buflen)
+{
+ char *end;
+ int namelen;
+ unsigned seq;
+ const char *base;
+
+rename_retry:
+ end = buffer+buflen;
+ *--end = '\0';
+ buflen--;
+
+ seq = read_seqbegin(&rename_lock);
+ rcu_read_lock();
+ while (1) {
+ spin_lock(&dentry->d_lock);
+ if (IS_ROOT(dentry))
+ break;
+ namelen = dentry->d_name.len;
+ buflen -= namelen + 1;
+ if (buflen < 0)
+ goto Elong_unlock;
+ end -= namelen;
+ memcpy(end, dentry->d_name.name, namelen);
+ *--end = '/';
+ spin_unlock(&dentry->d_lock);
+ dentry = dentry->d_parent;
+ }
+ if (read_seqretry(&rename_lock, seq)) {
+ spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ goto rename_retry;
+ }
+ if (*end != '/') {
+ if (--buflen < 0) {
+ spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ goto Elong;
+ }
+ *--end = '/';
+ }
+ *p = end;
+ base = dentry->d_fsdata;
+ if (!base) {
+ spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ WARN_ON(1);
+ return end;
+ }
+ namelen = strlen(base);
+ /* Strip off excess slashes in base string */
+ while (namelen > 0 && base[namelen - 1] == '/')
+ namelen--;
+ buflen -= namelen;
+ if (buflen < 0) {
+ spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ goto Elong;
+ }
+ end -= namelen;
+ memcpy(end, base, namelen);
+ spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ return end;
+Elong_unlock:
+ spin_unlock(&dentry->d_lock);
+ rcu_read_unlock();
+ if (read_seqretry(&rename_lock, seq))
+ goto rename_retry;
+Elong:
+ return ERR_PTR(-ENAMETOOLONG);
+}
+
+#ifdef CONFIG_NFS_V4
+rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
+{
+ struct gss_api_mech *mech;
+ struct xdr_netobj oid;
+ int i;
+ rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
+
+ for (i = 0; i < flavors->num_flavors; i++) {
+ struct nfs4_secinfo_flavor *flavor;
+ flavor = &flavors->flavors[i];
+
+ if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
+ pseudoflavor = flavor->flavor;
+ break;
+ } else if (flavor->flavor == RPC_AUTH_GSS) {
+ oid.len = flavor->gss.sec_oid4.len;
+ oid.data = flavor->gss.sec_oid4.data;
+ mech = gss_mech_get_by_OID(&oid);
+ if (!mech)
+ continue;
+ pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
+ gss_mech_put(mech);
+ break;
+ }
+ }
+
+ return pseudoflavor;
+}
+
+static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
+ struct qstr *name,
+ struct nfs_fh *fh,
+ struct nfs_fattr *fattr)
+{
+ int err;
+
+ if (NFS_PROTO(dir)->version == 4)
+ return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
+
+ err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
+ if (err)
+ return ERR_PTR(err);
+ return rpc_clone_client(NFS_SERVER(dir)->client);
+}
+#else /* CONFIG_NFS_V4 */
+static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
+ struct qstr *name,
+ struct nfs_fh *fh,
+ struct nfs_fattr *fattr)
+{
+ int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
+ if (err)
+ return ERR_PTR(err);
+ return rpc_clone_client(NFS_SERVER(dir)->client);
+}
+#endif /* CONFIG_NFS_V4 */
+
+/*
+ * nfs_d_automount - Handle crossing a mountpoint on the server
+ * @path - The mountpoint
+ *
+ * When we encounter a mountpoint on the server, we want to set up
+ * a mountpoint on the client too, to prevent inode numbers from
+ * colliding, and to allow "df" to work properly.
+ * On NFSv4, we also want to allow for the fact that different
+ * filesystems may be migrated to different servers in a failover
+ * situation, and that different filesystems may want to use
+ * different security flavours.
+ */
+struct vfsmount *nfs_d_automount(struct path *path)
+{
+ struct vfsmount *mnt;
+ struct dentry *parent;
+ struct nfs_fh *fh = NULL;
+ struct nfs_fattr *fattr = NULL;
+ struct rpc_clnt *client;
+
+ dprintk("--> nfs_d_automount()\n");
+
+ mnt = ERR_PTR(-ESTALE);
+ if (IS_ROOT(path->dentry))
+ goto out_nofree;
+
+ mnt = ERR_PTR(-ENOMEM);
+ fh = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ if (fh == NULL || fattr == NULL)
+ goto out;
+
+ dprintk("%s: enter\n", __func__);
+
+ /* Look it up again to get its attributes */
+ parent = dget_parent(path->dentry);
+ client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr);
+ dput(parent);
+ if (IS_ERR(client)) {
+ mnt = ERR_CAST(client);
+ goto out;
+ }
+
+ if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
+ mnt = nfs_do_refmount(client, path->dentry);
+ else
+ mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor);
+ rpc_shutdown_client(client);
+
+ if (IS_ERR(mnt))
+ goto out;
+
+ dprintk("%s: done, success\n", __func__);
+ mntget(mnt); /* prevent immediate expiration */
+ mnt_set_expiry(mnt, &nfs_automount_list);
+ schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+
+out:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fh);
+out_nofree:
+ if (IS_ERR(mnt))
+ dprintk("<-- %s(): error %ld\n", __func__, PTR_ERR(mnt));
+ else
+ dprintk("<-- %s() = %p\n", __func__, mnt);
+ return mnt;
+}
+
+const struct inode_operations nfs_mountpoint_inode_operations = {
+ .getattr = nfs_getattr,
+};
+
+const struct inode_operations nfs_referral_inode_operations = {
+};
+
+static void nfs_expire_automounts(struct work_struct *work)
+{
+ struct list_head *list = &nfs_automount_list;
+
+ mark_mounts_for_expiry(list);
+ if (!list_empty(list))
+ schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
+}
+
+void nfs_release_automount_timer(void)
+{
+ if (list_empty(&nfs_automount_list))
+ cancel_delayed_work(&nfs_automount_task);
+}
+
+/*
+ * Clone a mountpoint of the appropriate type
+ */
+static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
+ const char *devname,
+ struct nfs_clone_mount *mountdata)
+{
+#ifdef CONFIG_NFS_V4
+ struct vfsmount *mnt = ERR_PTR(-EINVAL);
+ switch (server->nfs_client->rpc_ops->version) {
+ case 2:
+ case 3:
+ mnt = vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
+ break;
+ case 4:
+ mnt = vfs_kern_mount(&nfs4_xdev_fs_type, 0, devname, mountdata);
+ }
+ return mnt;
+#else
+ return vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
+#endif
+}
+
+/**
+ * nfs_do_submount - set up mountpoint when crossing a filesystem boundary
+ * @dentry - parent directory
+ * @fh - filehandle for new root dentry
+ * @fattr - attributes for new root inode
+ * @authflavor - security flavor to use when performing the mount
+ *
+ */
+static struct vfsmount *nfs_do_submount(struct dentry *dentry,
+ struct nfs_fh *fh,
+ struct nfs_fattr *fattr,
+ rpc_authflavor_t authflavor)
+{
+ struct nfs_clone_mount mountdata = {
+ .sb = dentry->d_sb,
+ .dentry = dentry,
+ .fh = fh,
+ .fattr = fattr,
+ .authflavor = authflavor,
+ };
+ struct vfsmount *mnt = ERR_PTR(-ENOMEM);
+ char *page = (char *) __get_free_page(GFP_USER);
+ char *devname;
+
+ dprintk("--> nfs_do_submount()\n");
+
+ dprintk("%s: submounting on %s/%s\n", __func__,
+ dentry->d_parent->d_name.name,
+ dentry->d_name.name);
+ if (page == NULL)
+ goto out;
+ devname = nfs_devname(dentry, page, PAGE_SIZE);
+ mnt = (struct vfsmount *)devname;
+ if (IS_ERR(devname))
+ goto free_page;
+ mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata);
+free_page:
+ free_page((unsigned long)page);
+out:
+ dprintk("%s: done\n", __func__);
+
+ dprintk("<-- nfs_do_submount() = %p\n", mnt);
+ return mnt;
+}
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
new file mode 100644
index 00000000..aa14ec30
--- /dev/null
+++ b/fs/nfs/netns.h
@@ -0,0 +1,27 @@
+#ifndef __NFS_NETNS_H__
+#define __NFS_NETNS_H__
+
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+struct bl_dev_msg {
+ int32_t status;
+ uint32_t major, minor;
+};
+
+struct nfs_net {
+ struct cache_detail *nfs_dns_resolve;
+ struct rpc_pipe *bl_device_pipe;
+ struct bl_dev_msg bl_mount_reply;
+ wait_queue_head_t bl_wq;
+ struct list_head nfs_client_list;
+ struct list_head nfs_volume_list;
+#ifdef CONFIG_NFS_V4
+ struct idr cb_ident_idr; /* Protected by nfs_client_lock */
+#endif
+ spinlock_t nfs_client_lock;
+};
+
+extern int nfs_net_id;
+
+#endif
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
new file mode 100644
index 00000000..1f56000f
--- /dev/null
+++ b/fs/nfs/nfs2xdr.c
@@ -0,0 +1,1157 @@
+/*
+ * linux/fs/nfs/nfs2xdr.c
+ *
+ * XDR functions to encode/decode NFS RPC arguments and results.
+ *
+ * Copyright (C) 1992, 1993, 1994 Rick Sladkey
+ * Copyright (C) 1996 Olaf Kirch
+ * 04 Aug 1998 Ion Badulescu <ionut@cs.columbia.edu>
+ * FIFO's need special handling in NFSv2
+ */
+
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs_fs.h>
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_XDR
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO EIO
+
+/*
+ * Declare the space requirements for NFS arguments and replies as
+ * number of 32bit-words
+ */
+#define NFS_fhandle_sz (8)
+#define NFS_sattr_sz (8)
+#define NFS_filename_sz (1+(NFS2_MAXNAMLEN>>2))
+#define NFS_path_sz (1+(NFS2_MAXPATHLEN>>2))
+#define NFS_fattr_sz (17)
+#define NFS_info_sz (5)
+#define NFS_entry_sz (NFS_filename_sz+3)
+
+#define NFS_diropargs_sz (NFS_fhandle_sz+NFS_filename_sz)
+#define NFS_removeargs_sz (NFS_fhandle_sz+NFS_filename_sz)
+#define NFS_sattrargs_sz (NFS_fhandle_sz+NFS_sattr_sz)
+#define NFS_readlinkargs_sz (NFS_fhandle_sz)
+#define NFS_readargs_sz (NFS_fhandle_sz+3)
+#define NFS_writeargs_sz (NFS_fhandle_sz+4)
+#define NFS_createargs_sz (NFS_diropargs_sz+NFS_sattr_sz)
+#define NFS_renameargs_sz (NFS_diropargs_sz+NFS_diropargs_sz)
+#define NFS_linkargs_sz (NFS_fhandle_sz+NFS_diropargs_sz)
+#define NFS_symlinkargs_sz (NFS_diropargs_sz+1+NFS_sattr_sz)
+#define NFS_readdirargs_sz (NFS_fhandle_sz+2)
+
+#define NFS_attrstat_sz (1+NFS_fattr_sz)
+#define NFS_diropres_sz (1+NFS_fhandle_sz+NFS_fattr_sz)
+#define NFS_readlinkres_sz (2)
+#define NFS_readres_sz (1+NFS_fattr_sz+1)
+#define NFS_writeres_sz (NFS_attrstat_sz)
+#define NFS_stat_sz (1)
+#define NFS_readdirres_sz (1)
+#define NFS_statfsres_sz (1+NFS_info_sz)
+
+
+/*
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
+ */
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+ unsigned int base, unsigned int len,
+ unsigned int bufsize)
+{
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
+ unsigned int replen;
+
+ replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("NFS: %s prematurely hit the end of our receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NFSv2 basic data types
+ *
+ * Basic NFSv2 data types are defined in section 2.3 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions. For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+/*
+ * typedef opaque nfsdata<>;
+ */
+static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_readres *result)
+{
+ u32 recvd, count;
+ size_t hdrlen;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p);
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(count > recvd))
+ goto out_cheating;
+out:
+ xdr_read_pages(xdr, count);
+ result->eof = 0; /* NFSv2 does not pass EOF flag on the wire. */
+ result->count = count;
+ return count;
+out_cheating:
+ dprintk("NFS: server cheating in read result: "
+ "count %u > recvd %u\n", count, recvd);
+ count = recvd;
+ goto out;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * enum stat {
+ * NFS_OK = 0,
+ * NFSERR_PERM = 1,
+ * NFSERR_NOENT = 2,
+ * NFSERR_IO = 5,
+ * NFSERR_NXIO = 6,
+ * NFSERR_ACCES = 13,
+ * NFSERR_EXIST = 17,
+ * NFSERR_NODEV = 19,
+ * NFSERR_NOTDIR = 20,
+ * NFSERR_ISDIR = 21,
+ * NFSERR_FBIG = 27,
+ * NFSERR_NOSPC = 28,
+ * NFSERR_ROFS = 30,
+ * NFSERR_NAMETOOLONG = 63,
+ * NFSERR_NOTEMPTY = 66,
+ * NFSERR_DQUOT = 69,
+ * NFSERR_STALE = 70,
+ * NFSERR_WFLUSH = 99
+ * };
+ */
+static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *status = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * 2.3.2. ftype
+ *
+ * enum ftype {
+ * NFNON = 0,
+ * NFREG = 1,
+ * NFDIR = 2,
+ * NFBLK = 3,
+ * NFCHR = 4,
+ * NFLNK = 5
+ * };
+ *
+ */
+static __be32 *xdr_decode_ftype(__be32 *p, u32 *type)
+{
+ *type = be32_to_cpup(p++);
+ if (unlikely(*type > NF2FIFO))
+ *type = NFBAD;
+ return p;
+}
+
+/*
+ * 2.3.3. fhandle
+ *
+ * typedef opaque fhandle[FHSIZE];
+ */
+static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ BUG_ON(fh->size != NFS2_FHSIZE);
+ p = xdr_reserve_space(xdr, NFS2_FHSIZE);
+ memcpy(p, fh->data, NFS2_FHSIZE);
+}
+
+static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ fh->size = NFS2_FHSIZE;
+ memcpy(fh->data, p, NFS2_FHSIZE);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * 2.3.4. timeval
+ *
+ * struct timeval {
+ * unsigned int seconds;
+ * unsigned int useconds;
+ * };
+ */
+static __be32 *xdr_encode_time(__be32 *p, const struct timespec *timep)
+{
+ *p++ = cpu_to_be32(timep->tv_sec);
+ if (timep->tv_nsec != 0)
+ *p++ = cpu_to_be32(timep->tv_nsec / NSEC_PER_USEC);
+ else
+ *p++ = cpu_to_be32(0);
+ return p;
+}
+
+/*
+ * Passing the invalid value useconds=1000000 is a Sun convention for
+ * "set to current server time". It's needed to make permissions checks
+ * for the "touch" program across v2 mounts to Solaris and Irix servers
+ * work correctly. See description of sattr in section 6.1 of "NFS
+ * Illustrated" by Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5.
+ */
+static __be32 *xdr_encode_current_server_time(__be32 *p,
+ const struct timespec *timep)
+{
+ *p++ = cpu_to_be32(timep->tv_sec);
+ *p++ = cpu_to_be32(1000000);
+ return p;
+}
+
+static __be32 *xdr_decode_time(__be32 *p, struct timespec *timep)
+{
+ timep->tv_sec = be32_to_cpup(p++);
+ timep->tv_nsec = be32_to_cpup(p++) * NSEC_PER_USEC;
+ return p;
+}
+
+/*
+ * 2.3.5. fattr
+ *
+ * struct fattr {
+ * ftype type;
+ * unsigned int mode;
+ * unsigned int nlink;
+ * unsigned int uid;
+ * unsigned int gid;
+ * unsigned int size;
+ * unsigned int blocksize;
+ * unsigned int rdev;
+ * unsigned int blocks;
+ * unsigned int fsid;
+ * unsigned int fileid;
+ * timeval atime;
+ * timeval mtime;
+ * timeval ctime;
+ * };
+ *
+ */
+static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ u32 rdev, type;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+
+ fattr->valid |= NFS_ATTR_FATTR_V2;
+
+ p = xdr_decode_ftype(p, &type);
+
+ fattr->mode = be32_to_cpup(p++);
+ fattr->nlink = be32_to_cpup(p++);
+ fattr->uid = be32_to_cpup(p++);
+ fattr->gid = be32_to_cpup(p++);
+ fattr->size = be32_to_cpup(p++);
+ fattr->du.nfs2.blocksize = be32_to_cpup(p++);
+
+ rdev = be32_to_cpup(p++);
+ fattr->rdev = new_decode_dev(rdev);
+ if (type == (u32)NFCHR && rdev == (u32)NFS2_FIFO_DEV) {
+ fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
+ fattr->rdev = 0;
+ }
+
+ fattr->du.nfs2.blocks = be32_to_cpup(p++);
+ fattr->fsid.major = be32_to_cpup(p++);
+ fattr->fsid.minor = 0;
+ fattr->fileid = be32_to_cpup(p++);
+
+ p = xdr_decode_time(p, &fattr->atime);
+ p = xdr_decode_time(p, &fattr->mtime);
+ xdr_decode_time(p, &fattr->ctime);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * 2.3.6. sattr
+ *
+ * struct sattr {
+ * unsigned int mode;
+ * unsigned int uid;
+ * unsigned int gid;
+ * unsigned int size;
+ * timeval atime;
+ * timeval mtime;
+ * };
+ */
+
+#define NFS2_SATTR_NOT_SET (0xffffffff)
+
+static __be32 *xdr_time_not_set(__be32 *p)
+{
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ return p;
+}
+
+static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, NFS_sattr_sz << 2);
+
+ if (attr->ia_valid & ATTR_MODE)
+ *p++ = cpu_to_be32(attr->ia_mode);
+ else
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ if (attr->ia_valid & ATTR_UID)
+ *p++ = cpu_to_be32(attr->ia_uid);
+ else
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ if (attr->ia_valid & ATTR_GID)
+ *p++ = cpu_to_be32(attr->ia_gid);
+ else
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+ if (attr->ia_valid & ATTR_SIZE)
+ *p++ = cpu_to_be32((u32)attr->ia_size);
+ else
+ *p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+
+ if (attr->ia_valid & ATTR_ATIME_SET)
+ p = xdr_encode_time(p, &attr->ia_atime);
+ else if (attr->ia_valid & ATTR_ATIME)
+ p = xdr_encode_current_server_time(p, &attr->ia_atime);
+ else
+ p = xdr_time_not_set(p);
+ if (attr->ia_valid & ATTR_MTIME_SET)
+ xdr_encode_time(p, &attr->ia_mtime);
+ else if (attr->ia_valid & ATTR_MTIME)
+ xdr_encode_current_server_time(p, &attr->ia_mtime);
+ else
+ xdr_time_not_set(p);
+}
+
+/*
+ * 2.3.7. filename
+ *
+ * typedef string filename<MAXNAMLEN>;
+ */
+static void encode_filename(struct xdr_stream *xdr,
+ const char *name, u32 length)
+{
+ __be32 *p;
+
+ BUG_ON(length > NFS2_MAXNAMLEN);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, name, length);
+}
+
+static int decode_filename_inline(struct xdr_stream *xdr,
+ const char **name, u32 *length)
+{
+ __be32 *p;
+ u32 count;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p);
+ if (count > NFS3_MAXNAMLEN)
+ goto out_nametoolong;
+ p = xdr_inline_decode(xdr, count);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *name = (const char *)p;
+ *length = count;
+ return 0;
+out_nametoolong:
+ dprintk("NFS: returned filename too long: %u\n", count);
+ return -ENAMETOOLONG;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * 2.3.8. path
+ *
+ * typedef string path<MAXPATHLEN>;
+ */
+static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
+{
+ __be32 *p;
+
+ BUG_ON(length > NFS2_MAXPATHLEN);
+ p = xdr_reserve_space(xdr, 4);
+ *p = cpu_to_be32(length);
+ xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_path(struct xdr_stream *xdr)
+{
+ u32 length, recvd;
+ size_t hdrlen;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ length = be32_to_cpup(p);
+ if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
+ goto out_size;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(length > recvd))
+ goto out_cheating;
+
+ xdr_read_pages(xdr, length);
+ xdr_terminate_string(xdr->buf, length);
+ return 0;
+out_size:
+ dprintk("NFS: returned pathname too long: %u\n", length);
+ return -ENAMETOOLONG;
+out_cheating:
+ dprintk("NFS: server cheating in pathname result: "
+ "length %u > received %u\n", length, recvd);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * 2.3.9. attrstat
+ *
+ * union attrstat switch (stat status) {
+ * case NFS_OK:
+ * fattr attributes;
+ * default:
+ * void;
+ * };
+ */
+static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_fattr(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.3.10. diropargs
+ *
+ * struct diropargs {
+ * fhandle dir;
+ * filename name;
+ * };
+ */
+static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh,
+ const char *name, u32 length)
+{
+ encode_fhandle(xdr, fh);
+ encode_filename(xdr, name, length);
+}
+
+/*
+ * 2.3.11. diropres
+ *
+ * union diropres switch (stat status) {
+ * case NFS_OK:
+ * struct {
+ * fhandle file;
+ * fattr attributes;
+ * } diropok;
+ * default:
+ * void;
+ * };
+ */
+static int decode_diropok(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+ int error;
+
+ error = decode_fhandle(xdr, result->fh);
+ if (unlikely(error))
+ goto out;
+ error = decode_fattr(xdr, result->fattr);
+out:
+ return error;
+}
+
+static int decode_diropres(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_diropok(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+
+/*
+ * NFSv2 XDR encode functions
+ *
+ * NFSv2 argument types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ */
+
+static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_fh *fh)
+{
+ encode_fhandle(xdr, fh);
+}
+
+/*
+ * 2.2.3. sattrargs
+ *
+ * struct sattrargs {
+ * fhandle file;
+ * sattr attributes;
+ * };
+ */
+static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_sattrargs *args)
+{
+ encode_fhandle(xdr, args->fh);
+ encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_diropargs *args)
+{
+ encode_diropargs(xdr, args->fh, args->name, args->len);
+}
+
+static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_readlinkargs *args)
+{
+ encode_fhandle(xdr, args->fh);
+ prepare_reply_buffer(req, args->pages, args->pgbase,
+ args->pglen, NFS_readlinkres_sz);
+}
+
+/*
+ * 2.2.7. readargs
+ *
+ * struct readargs {
+ * fhandle file;
+ * unsigned offset;
+ * unsigned count;
+ * unsigned totalcount;
+ * };
+ */
+static void encode_readargs(struct xdr_stream *xdr,
+ const struct nfs_readargs *args)
+{
+ u32 offset = args->offset;
+ u32 count = args->count;
+ __be32 *p;
+
+ encode_fhandle(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 4 + 4 + 4);
+ *p++ = cpu_to_be32(offset);
+ *p++ = cpu_to_be32(count);
+ *p = cpu_to_be32(count);
+}
+
+static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_readargs *args)
+{
+ encode_readargs(xdr, args);
+ prepare_reply_buffer(req, args->pages, args->pgbase,
+ args->count, NFS_readres_sz);
+ req->rq_rcv_buf.flags |= XDRBUF_READ;
+}
+
+/*
+ * 2.2.9. writeargs
+ *
+ * struct writeargs {
+ * fhandle file;
+ * unsigned beginoffset;
+ * unsigned offset;
+ * unsigned totalcount;
+ * nfsdata data;
+ * };
+ */
+static void encode_writeargs(struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
+{
+ u32 offset = args->offset;
+ u32 count = args->count;
+ __be32 *p;
+
+ encode_fhandle(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+ *p++ = cpu_to_be32(offset);
+ *p++ = cpu_to_be32(offset);
+ *p++ = cpu_to_be32(count);
+
+ /* nfsdata */
+ *p = cpu_to_be32(count);
+ xdr_write_pages(xdr, args->pages, args->pgbase, count);
+}
+
+static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
+{
+ encode_writeargs(xdr, args);
+ xdr->buf->flags |= XDRBUF_WRITE;
+}
+
+/*
+ * 2.2.10. createargs
+ *
+ * struct createargs {
+ * diropargs where;
+ * sattr attributes;
+ * };
+ */
+static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_createargs *args)
+{
+ encode_diropargs(xdr, args->fh, args->name, args->len);
+ encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_removeargs *args)
+{
+ encode_diropargs(xdr, args->fh, args->name.name, args->name.len);
+}
+
+/*
+ * 2.2.12. renameargs
+ *
+ * struct renameargs {
+ * diropargs from;
+ * diropargs to;
+ * };
+ */
+static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_renameargs *args)
+{
+ const struct qstr *old = args->old_name;
+ const struct qstr *new = args->new_name;
+
+ encode_diropargs(xdr, args->old_dir, old->name, old->len);
+ encode_diropargs(xdr, args->new_dir, new->name, new->len);
+}
+
+/*
+ * 2.2.13. linkargs
+ *
+ * struct linkargs {
+ * fhandle from;
+ * diropargs to;
+ * };
+ */
+static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_linkargs *args)
+{
+ encode_fhandle(xdr, args->fromfh);
+ encode_diropargs(xdr, args->tofh, args->toname, args->tolen);
+}
+
+/*
+ * 2.2.14. symlinkargs
+ *
+ * struct symlinkargs {
+ * diropargs from;
+ * path to;
+ * sattr attributes;
+ * };
+ */
+static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_symlinkargs *args)
+{
+ encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen);
+ encode_path(xdr, args->pages, args->pathlen);
+ encode_sattr(xdr, args->sattr);
+}
+
+/*
+ * 2.2.17. readdirargs
+ *
+ * struct readdirargs {
+ * fhandle dir;
+ * nfscookie cookie;
+ * unsigned count;
+ * };
+ */
+static void encode_readdirargs(struct xdr_stream *xdr,
+ const struct nfs_readdirargs *args)
+{
+ __be32 *p;
+
+ encode_fhandle(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 4 + 4);
+ *p++ = cpu_to_be32(args->cookie);
+ *p = cpu_to_be32(args->count);
+}
+
+static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_readdirargs *args)
+{
+ encode_readdirargs(xdr, args);
+ prepare_reply_buffer(req, args->pages, 0,
+ args->count, NFS_readdirres_sz);
+}
+
+/*
+ * NFSv2 XDR decode functions
+ *
+ * NFSv2 result types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ */
+
+static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr,
+ void *__unused)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_fattr *result)
+{
+ return decode_attrstat(xdr, result);
+}
+
+static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_diropok *result)
+{
+ return decode_diropres(xdr, result);
+}
+
+/*
+ * 2.2.6. readlinkres
+ *
+ * union readlinkres switch (stat status) {
+ * case NFS_OK:
+ * path data;
+ * default:
+ * void;
+ * };
+ */
+static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req,
+ struct xdr_stream *xdr, void *__unused)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_path(xdr);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.2.7. readres
+ *
+ * union readres switch (stat status) {
+ * case NFS_OK:
+ * fattr attributes;
+ * nfsdata data;
+ * default:
+ * void;
+ * };
+ */
+static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_readres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_fattr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_nfsdata(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_writeres *result)
+{
+ /* All NFSv2 writes are "file sync" writes */
+ result->verf->committed = NFS_FILE_SYNC;
+ return decode_attrstat(xdr, result->fattr);
+}
+
+/**
+ * nfs2_decode_dirent - Decode a single NFSv2 directory entry stored in
+ * the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 2.2.17. entry
+ *
+ * struct entry {
+ * unsigned fileid;
+ * filename name;
+ * nfscookie cookie;
+ * entry *nextentry;
+ * };
+ */
+int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ int plus)
+{
+ __be32 *p;
+ int error;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p++ == xdr_zero) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p++ == xdr_zero)
+ return -EAGAIN;
+ entry->eof = 1;
+ return -EBADCOOKIE;
+ }
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ entry->ino = be32_to_cpup(p);
+
+ error = decode_filename_inline(xdr, &entry->name, &entry->len);
+ if (unlikely(error))
+ return error;
+
+ /*
+ * The type (size and byte order) of nfscookie isn't defined in
+ * RFC 1094. This implementation assumes that it's an XDR uint32.
+ */
+ entry->prev_cookie = entry->cookie;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ entry->cookie = be32_to_cpup(p);
+
+ entry->d_type = DT_UNKNOWN;
+
+ return 0;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EAGAIN;
+}
+
+/*
+ * 2.2.17. readdirres
+ *
+ * union readdirres switch (stat status) {
+ * case NFS_OK:
+ * struct {
+ * entry *entries;
+ * bool eof;
+ * } readdirok;
+ * default:
+ * void;
+ * };
+ *
+ * Read the directory contents into the page cache, but don't
+ * touch them. The actual decoding is done by nfs2_decode_dirent()
+ * during subsequent nfs_readdir() calls.
+ */
+static int decode_readdirok(struct xdr_stream *xdr)
+{
+ u32 recvd, pglen;
+ size_t hdrlen;
+
+ pglen = xdr->buf->page_len;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(pglen > recvd))
+ goto out_cheating;
+out:
+ xdr_read_pages(xdr, pglen);
+ return pglen;
+out_cheating:
+ dprintk("NFS: server cheating in readdir result: "
+ "pglen %u > recvd %u\n", pglen, recvd);
+ pglen = recvd;
+ goto out;
+}
+
+static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
+ struct xdr_stream *xdr, void *__unused)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_readdirok(xdr);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.2.18. statfsres
+ *
+ * union statfsres (stat status) {
+ * case NFS_OK:
+ * struct {
+ * unsigned tsize;
+ * unsigned bsize;
+ * unsigned blocks;
+ * unsigned bfree;
+ * unsigned bavail;
+ * } info;
+ * default:
+ * void;
+ * };
+ */
+static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS_info_sz << 2);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ result->tsize = be32_to_cpup(p++);
+ result->bsize = be32_to_cpup(p++);
+ result->blocks = be32_to_cpup(p++);
+ result->bfree = be32_to_cpup(p++);
+ result->bavail = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs2_fsstat *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_stat(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS_OK)
+ goto out_default;
+ error = decode_info(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static const struct {
+ int stat;
+ int errno;
+} nfs_errtbl[] = {
+ { NFS_OK, 0 },
+ { NFSERR_PERM, -EPERM },
+ { NFSERR_NOENT, -ENOENT },
+ { NFSERR_IO, -errno_NFSERR_IO},
+ { NFSERR_NXIO, -ENXIO },
+/* { NFSERR_EAGAIN, -EAGAIN }, */
+ { NFSERR_ACCES, -EACCES },
+ { NFSERR_EXIST, -EEXIST },
+ { NFSERR_XDEV, -EXDEV },
+ { NFSERR_NODEV, -ENODEV },
+ { NFSERR_NOTDIR, -ENOTDIR },
+ { NFSERR_ISDIR, -EISDIR },
+ { NFSERR_INVAL, -EINVAL },
+ { NFSERR_FBIG, -EFBIG },
+ { NFSERR_NOSPC, -ENOSPC },
+ { NFSERR_ROFS, -EROFS },
+ { NFSERR_MLINK, -EMLINK },
+ { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFSERR_NOTEMPTY, -ENOTEMPTY },
+ { NFSERR_DQUOT, -EDQUOT },
+ { NFSERR_STALE, -ESTALE },
+ { NFSERR_REMOTE, -EREMOTE },
+#ifdef EWFLUSH
+ { NFSERR_WFLUSH, -EWFLUSH },
+#endif
+ { NFSERR_BADHANDLE, -EBADHANDLE },
+ { NFSERR_NOT_SYNC, -ENOTSYNC },
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+ { -1, -EIO }
+};
+
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized. This function is used jointly by NFSv2 and NFSv3.
+ */
+int nfs_stat_to_errno(enum nfs_stat status)
+{
+ int i;
+
+ for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ if (nfs_errtbl[i].stat == (int)status)
+ return nfs_errtbl[i].errno;
+ }
+ dprintk("NFS: Unrecognized nfs status value: %u\n", status);
+ return nfs_errtbl[i].errno;
+}
+
+#define PROC(proc, argtype, restype, timer) \
+[NFSPROC_##proc] = { \
+ .p_proc = NFSPROC_##proc, \
+ .p_encode = (kxdreproc_t)nfs2_xdr_enc_##argtype, \
+ .p_decode = (kxdrdproc_t)nfs2_xdr_dec_##restype, \
+ .p_arglen = NFS_##argtype##_sz, \
+ .p_replen = NFS_##restype##_sz, \
+ .p_timer = timer, \
+ .p_statidx = NFSPROC_##proc, \
+ .p_name = #proc, \
+ }
+struct rpc_procinfo nfs_procedures[] = {
+ PROC(GETATTR, fhandle, attrstat, 1),
+ PROC(SETATTR, sattrargs, attrstat, 0),
+ PROC(LOOKUP, diropargs, diropres, 2),
+ PROC(READLINK, readlinkargs, readlinkres, 3),
+ PROC(READ, readargs, readres, 3),
+ PROC(WRITE, writeargs, writeres, 4),
+ PROC(CREATE, createargs, diropres, 0),
+ PROC(REMOVE, removeargs, stat, 0),
+ PROC(RENAME, renameargs, stat, 0),
+ PROC(LINK, linkargs, stat, 0),
+ PROC(SYMLINK, symlinkargs, stat, 0),
+ PROC(MKDIR, createargs, diropres, 0),
+ PROC(RMDIR, diropargs, stat, 0),
+ PROC(READDIR, readdirargs, readdirres, 3),
+ PROC(STATFS, fhandle, statfsres, 0),
+};
+
+const struct rpc_version nfs_version2 = {
+ .number = 2,
+ .nrprocs = ARRAY_SIZE(nfs_procedures),
+ .procs = nfs_procedures
+};
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
new file mode 100644
index 00000000..e4498dc3
--- /dev/null
+++ b/fs/nfs/nfs3acl.c
@@ -0,0 +1,440 @@
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/nfs.h>
+#include <linux/nfs3.h>
+#include <linux/nfs_fs.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/nfsacl.h>
+
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_PROC
+
+ssize_t nfs3_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct posix_acl *acl;
+ int pos=0, len=0;
+
+# define output(s) do { \
+ if (pos + sizeof(s) <= size) { \
+ memcpy(buffer + pos, s, sizeof(s)); \
+ pos += sizeof(s); \
+ } \
+ len += sizeof(s); \
+ } while(0)
+
+ acl = nfs3_proc_getacl(inode, ACL_TYPE_ACCESS);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ output("system.posix_acl_access");
+ posix_acl_release(acl);
+ }
+
+ if (S_ISDIR(inode->i_mode)) {
+ acl = nfs3_proc_getacl(inode, ACL_TYPE_DEFAULT);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ if (acl) {
+ output("system.posix_acl_default");
+ posix_acl_release(acl);
+ }
+ }
+
+# undef output
+
+ if (!buffer || len <= size)
+ return len;
+ return -ERANGE;
+}
+
+ssize_t nfs3_getxattr(struct dentry *dentry, const char *name,
+ void *buffer, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct posix_acl *acl;
+ int type, error = 0;
+
+ if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0)
+ type = ACL_TYPE_ACCESS;
+ else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0)
+ type = ACL_TYPE_DEFAULT;
+ else
+ return -EOPNOTSUPP;
+
+ acl = nfs3_proc_getacl(inode, type);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ else if (acl) {
+ if (type == ACL_TYPE_ACCESS && acl->a_count == 0)
+ error = -ENODATA;
+ else
+ error = posix_acl_to_xattr(acl, buffer, size);
+ posix_acl_release(acl);
+ } else
+ error = -ENODATA;
+
+ return error;
+}
+
+int nfs3_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ struct posix_acl *acl;
+ int type, error;
+
+ if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0)
+ type = ACL_TYPE_ACCESS;
+ else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0)
+ type = ACL_TYPE_DEFAULT;
+ else
+ return -EOPNOTSUPP;
+
+ acl = posix_acl_from_xattr(value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ error = nfs3_proc_setacl(inode, type, acl);
+ posix_acl_release(acl);
+
+ return error;
+}
+
+int nfs3_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ int type;
+
+ if (strcmp(name, POSIX_ACL_XATTR_ACCESS) == 0)
+ type = ACL_TYPE_ACCESS;
+ else if (strcmp(name, POSIX_ACL_XATTR_DEFAULT) == 0)
+ type = ACL_TYPE_DEFAULT;
+ else
+ return -EOPNOTSUPP;
+
+ return nfs3_proc_setacl(inode, type, NULL);
+}
+
+static void __nfs3_forget_cached_acls(struct nfs_inode *nfsi)
+{
+ if (!IS_ERR(nfsi->acl_access)) {
+ posix_acl_release(nfsi->acl_access);
+ nfsi->acl_access = ERR_PTR(-EAGAIN);
+ }
+ if (!IS_ERR(nfsi->acl_default)) {
+ posix_acl_release(nfsi->acl_default);
+ nfsi->acl_default = ERR_PTR(-EAGAIN);
+ }
+}
+
+void nfs3_forget_cached_acls(struct inode *inode)
+{
+ dprintk("NFS: nfs3_forget_cached_acls(%s/%ld)\n", inode->i_sb->s_id,
+ inode->i_ino);
+ spin_lock(&inode->i_lock);
+ __nfs3_forget_cached_acls(NFS_I(inode));
+ spin_unlock(&inode->i_lock);
+}
+
+static struct posix_acl *nfs3_get_cached_acl(struct inode *inode, int type)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct posix_acl *acl = ERR_PTR(-EINVAL);
+
+ spin_lock(&inode->i_lock);
+ switch(type) {
+ case ACL_TYPE_ACCESS:
+ acl = nfsi->acl_access;
+ break;
+
+ case ACL_TYPE_DEFAULT:
+ acl = nfsi->acl_default;
+ break;
+
+ default:
+ goto out;
+ }
+ if (IS_ERR(acl))
+ acl = ERR_PTR(-EAGAIN);
+ else
+ acl = posix_acl_dup(acl);
+out:
+ spin_unlock(&inode->i_lock);
+ dprintk("NFS: nfs3_get_cached_acl(%s/%ld, %d) = %p\n", inode->i_sb->s_id,
+ inode->i_ino, type, acl);
+ return acl;
+}
+
+static void nfs3_cache_acls(struct inode *inode, struct posix_acl *acl,
+ struct posix_acl *dfacl)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ dprintk("nfs3_cache_acls(%s/%ld, %p, %p)\n", inode->i_sb->s_id,
+ inode->i_ino, acl, dfacl);
+ spin_lock(&inode->i_lock);
+ __nfs3_forget_cached_acls(NFS_I(inode));
+ if (!IS_ERR(acl))
+ nfsi->acl_access = posix_acl_dup(acl);
+ if (!IS_ERR(dfacl))
+ nfsi->acl_default = posix_acl_dup(dfacl);
+ spin_unlock(&inode->i_lock);
+}
+
+struct posix_acl *nfs3_proc_getacl(struct inode *inode, int type)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct page *pages[NFSACL_MAXPAGES] = { };
+ struct nfs3_getaclargs args = {
+ .fh = NFS_FH(inode),
+ /* The xdr layer may allocate pages here. */
+ .pages = pages,
+ };
+ struct nfs3_getaclres res = {
+ NULL,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ struct posix_acl *acl;
+ int status, count;
+
+ if (!nfs_server_capable(inode, NFS_CAP_ACLS))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ status = nfs_revalidate_inode(server, inode);
+ if (status < 0)
+ return ERR_PTR(status);
+ acl = nfs3_get_cached_acl(inode, type);
+ if (acl != ERR_PTR(-EAGAIN))
+ return acl;
+ acl = NULL;
+
+ /*
+ * Only get the access acl when explicitly requested: We don't
+ * need it for access decisions, and only some applications use
+ * it. Applications which request the access acl first are not
+ * penalized from this optimization.
+ */
+ if (type == ACL_TYPE_ACCESS)
+ args.mask |= NFS_ACLCNT|NFS_ACL;
+ if (S_ISDIR(inode->i_mode))
+ args.mask |= NFS_DFACLCNT|NFS_DFACL;
+ if (args.mask == 0)
+ return NULL;
+
+ dprintk("NFS call getacl\n");
+ msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_GETACL];
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ status = rpc_call_sync(server->client_acl, &msg, 0);
+ dprintk("NFS reply getacl: %d\n", status);
+
+ /* pages may have been allocated at the xdr layer. */
+ for (count = 0; count < NFSACL_MAXPAGES && args.pages[count]; count++)
+ __free_page(args.pages[count]);
+
+ switch (status) {
+ case 0:
+ status = nfs_refresh_inode(inode, res.fattr);
+ break;
+ case -EPFNOSUPPORT:
+ case -EPROTONOSUPPORT:
+ dprintk("NFS_V3_ACL extension not supported; disabling\n");
+ server->caps &= ~NFS_CAP_ACLS;
+ case -ENOTSUPP:
+ status = -EOPNOTSUPP;
+ default:
+ goto getout;
+ }
+ if ((args.mask & res.mask) != args.mask) {
+ status = -EIO;
+ goto getout;
+ }
+
+ if (res.acl_access != NULL) {
+ if (posix_acl_equiv_mode(res.acl_access, NULL) == 0) {
+ posix_acl_release(res.acl_access);
+ res.acl_access = NULL;
+ }
+ }
+ nfs3_cache_acls(inode,
+ (res.mask & NFS_ACL) ? res.acl_access : ERR_PTR(-EINVAL),
+ (res.mask & NFS_DFACL) ? res.acl_default : ERR_PTR(-EINVAL));
+
+ switch(type) {
+ case ACL_TYPE_ACCESS:
+ acl = res.acl_access;
+ res.acl_access = NULL;
+ break;
+
+ case ACL_TYPE_DEFAULT:
+ acl = res.acl_default;
+ res.acl_default = NULL;
+ }
+
+getout:
+ posix_acl_release(res.acl_access);
+ posix_acl_release(res.acl_default);
+ nfs_free_fattr(res.fattr);
+
+ if (status != 0) {
+ posix_acl_release(acl);
+ acl = ERR_PTR(status);
+ }
+ return acl;
+}
+
+static int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+ struct posix_acl *dfacl)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_fattr *fattr;
+ struct page *pages[NFSACL_MAXPAGES];
+ struct nfs3_setaclargs args = {
+ .inode = inode,
+ .mask = NFS_ACL,
+ .acl_access = acl,
+ .pages = pages,
+ };
+ struct rpc_message msg = {
+ .rpc_argp = &args,
+ .rpc_resp = &fattr,
+ };
+ int status;
+
+ status = -EOPNOTSUPP;
+ if (!nfs_server_capable(inode, NFS_CAP_ACLS))
+ goto out;
+
+ /* We are doing this here because XDR marshalling does not
+ * return any results, it BUGs. */
+ status = -ENOSPC;
+ if (acl != NULL && acl->a_count > NFS_ACL_MAX_ENTRIES)
+ goto out;
+ if (dfacl != NULL && dfacl->a_count > NFS_ACL_MAX_ENTRIES)
+ goto out;
+ if (S_ISDIR(inode->i_mode)) {
+ args.mask |= NFS_DFACL;
+ args.acl_default = dfacl;
+ args.len = nfsacl_size(acl, dfacl);
+ } else
+ args.len = nfsacl_size(acl, NULL);
+
+ if (args.len > NFS_ACL_INLINE_BUFSIZE) {
+ unsigned int npages = 1 + ((args.len - 1) >> PAGE_SHIFT);
+
+ status = -ENOMEM;
+ do {
+ args.pages[args.npages] = alloc_page(GFP_KERNEL);
+ if (args.pages[args.npages] == NULL)
+ goto out_freepages;
+ args.npages++;
+ } while (args.npages < npages);
+ }
+
+ dprintk("NFS call setacl\n");
+ status = -ENOMEM;
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out_freepages;
+
+ msg.rpc_proc = &server->client_acl->cl_procinfo[ACLPROC3_SETACL];
+ msg.rpc_resp = fattr;
+ status = rpc_call_sync(server->client_acl, &msg, 0);
+ nfs_access_zap_cache(inode);
+ nfs_zap_acl_cache(inode);
+ dprintk("NFS reply setacl: %d\n", status);
+
+ switch (status) {
+ case 0:
+ status = nfs_refresh_inode(inode, fattr);
+ nfs3_cache_acls(inode, acl, dfacl);
+ break;
+ case -EPFNOSUPPORT:
+ case -EPROTONOSUPPORT:
+ dprintk("NFS_V3_ACL SETACL RPC not supported"
+ "(will not retry)\n");
+ server->caps &= ~NFS_CAP_ACLS;
+ case -ENOTSUPP:
+ status = -EOPNOTSUPP;
+ }
+ nfs_free_fattr(fattr);
+out_freepages:
+ while (args.npages != 0) {
+ args.npages--;
+ __free_page(args.pages[args.npages]);
+ }
+out:
+ return status;
+}
+
+int nfs3_proc_setacl(struct inode *inode, int type, struct posix_acl *acl)
+{
+ struct posix_acl *alloc = NULL, *dfacl = NULL;
+ int status;
+
+ if (S_ISDIR(inode->i_mode)) {
+ switch(type) {
+ case ACL_TYPE_ACCESS:
+ alloc = dfacl = nfs3_proc_getacl(inode,
+ ACL_TYPE_DEFAULT);
+ if (IS_ERR(alloc))
+ goto fail;
+ break;
+
+ case ACL_TYPE_DEFAULT:
+ dfacl = acl;
+ alloc = acl = nfs3_proc_getacl(inode,
+ ACL_TYPE_ACCESS);
+ if (IS_ERR(alloc))
+ goto fail;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else if (type != ACL_TYPE_ACCESS)
+ return -EINVAL;
+
+ if (acl == NULL) {
+ alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+ if (IS_ERR(alloc))
+ goto fail;
+ }
+ status = nfs3_proc_setacls(inode, acl, dfacl);
+ posix_acl_release(alloc);
+ return status;
+
+fail:
+ return PTR_ERR(alloc);
+}
+
+int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
+ umode_t mode)
+{
+ struct posix_acl *dfacl, *acl;
+ int error = 0;
+
+ dfacl = nfs3_proc_getacl(dir, ACL_TYPE_DEFAULT);
+ if (IS_ERR(dfacl)) {
+ error = PTR_ERR(dfacl);
+ return (error == -EOPNOTSUPP) ? 0 : error;
+ }
+ if (!dfacl)
+ return 0;
+ acl = posix_acl_dup(dfacl);
+ error = posix_acl_create(&acl, GFP_KERNEL, &mode);
+ if (error < 0)
+ goto out_release_dfacl;
+ error = nfs3_proc_setacls(inode, acl, S_ISDIR(inode->i_mode) ?
+ dfacl : NULL);
+ posix_acl_release(acl);
+out_release_dfacl:
+ posix_acl_release(dfacl);
+ return error;
+}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
new file mode 100644
index 00000000..5242eae6
--- /dev/null
+++ b/fs/nfs/nfs3proc.c
@@ -0,0 +1,915 @@
+/*
+ * linux/fs/nfs/nfs3proc.c
+ *
+ * Client-side NFSv3 procedures stubs.
+ *
+ * Copyright (C) 1997, Olaf Kirch
+ */
+
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/slab.h>
+#include <linux/nfs.h>
+#include <linux/nfs3.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/lockd/bind.h>
+#include <linux/nfs_mount.h>
+#include <linux/freezer.h>
+
+#include "iostat.h"
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_PROC
+
+/* A wrapper to handle the EJUKEBOX and EKEYEXPIRED error messages */
+static int
+nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
+{
+ int res;
+ do {
+ res = rpc_call_sync(clnt, msg, flags);
+ if (res != -EJUKEBOX && res != -EKEYEXPIRED)
+ break;
+ freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+ res = -ERESTARTSYS;
+ } while (!fatal_signal_pending(current));
+ return res;
+}
+
+#define rpc_call_sync(clnt, msg, flags) nfs3_rpc_wrapper(clnt, msg, flags)
+
+static int
+nfs3_async_handle_jukebox(struct rpc_task *task, struct inode *inode)
+{
+ if (task->tk_status != -EJUKEBOX && task->tk_status != -EKEYEXPIRED)
+ return 0;
+ if (task->tk_status == -EJUKEBOX)
+ nfs_inc_stats(inode, NFSIOS_DELAY);
+ task->tk_status = 0;
+ rpc_restart_call(task);
+ rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
+ return 1;
+}
+
+static int
+do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_FSINFO],
+ .rpc_argp = fhandle,
+ .rpc_resp = info,
+ };
+ int status;
+
+ dprintk("%s: call fsinfo\n", __func__);
+ nfs_fattr_init(info->fattr);
+ status = rpc_call_sync(client, &msg, 0);
+ dprintk("%s: reply fsinfo: %d\n", __func__, status);
+ if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
+ msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
+ msg.rpc_resp = info->fattr;
+ status = rpc_call_sync(client, &msg, 0);
+ dprintk("%s: reply getattr: %d\n", __func__, status);
+ }
+ return status;
+}
+
+/*
+ * Bare-bones access to getattr: this is for nfs_get_root/nfs_get_sb
+ */
+static int
+nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ int status;
+
+ status = do_proc_get_root(server->client, fhandle, info);
+ if (status && server->nfs_client->cl_rpcclient != server->client)
+ status = do_proc_get_root(server->nfs_client->cl_rpcclient, fhandle, info);
+ return status;
+}
+
+/*
+ * One function for each procedure in the NFS protocol.
+ */
+static int
+nfs3_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR],
+ .rpc_argp = fhandle,
+ .rpc_resp = fattr,
+ };
+ int status;
+
+ dprintk("NFS call getattr\n");
+ nfs_fattr_init(fattr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ dprintk("NFS reply getattr: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+ struct iattr *sattr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct nfs3_sattrargs arg = {
+ .fh = NFS_FH(inode),
+ .sattr = sattr,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_SETATTR],
+ .rpc_argp = &arg,
+ .rpc_resp = fattr,
+ };
+ int status;
+
+ dprintk("NFS call setattr\n");
+ if (sattr->ia_valid & ATTR_FILE)
+ msg.rpc_cred = nfs_file_cred(sattr->ia_file);
+ nfs_fattr_init(fattr);
+ status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
+ dprintk("NFS reply setattr: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ struct nfs3_diropargs arg = {
+ .fh = NFS_FH(dir),
+ .name = name->name,
+ .len = name->len
+ };
+ struct nfs3_diropres res = {
+ .fh = fhandle,
+ .fattr = fattr
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_LOOKUP],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ dprintk("NFS call lookup %s\n", name->name);
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ return -ENOMEM;
+
+ nfs_fattr_init(fattr);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_refresh_inode(dir, res.dir_attr);
+ if (status >= 0 && !(fattr->valid & NFS_ATTR_FATTR)) {
+ msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
+ msg.rpc_argp = fhandle;
+ msg.rpc_resp = fattr;
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ }
+ nfs_free_fattr(res.dir_attr);
+ dprintk("NFS reply lookup: %d\n", status);
+ return status;
+}
+
+static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+{
+ struct nfs3_accessargs arg = {
+ .fh = NFS_FH(inode),
+ };
+ struct nfs3_accessres res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ .rpc_cred = entry->cred,
+ };
+ int mode = entry->mask;
+ int status = -ENOMEM;
+
+ dprintk("NFS call access\n");
+
+ if (mode & MAY_READ)
+ arg.access |= NFS3_ACCESS_READ;
+ if (S_ISDIR(inode->i_mode)) {
+ if (mode & MAY_WRITE)
+ arg.access |= NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND | NFS3_ACCESS_DELETE;
+ if (mode & MAY_EXEC)
+ arg.access |= NFS3_ACCESS_LOOKUP;
+ } else {
+ if (mode & MAY_WRITE)
+ arg.access |= NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND;
+ if (mode & MAY_EXEC)
+ arg.access |= NFS3_ACCESS_EXECUTE;
+ }
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ goto out;
+
+ status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ nfs_refresh_inode(inode, res.fattr);
+ if (status == 0) {
+ entry->mask = 0;
+ if (res.access & NFS3_ACCESS_READ)
+ entry->mask |= MAY_READ;
+ if (res.access & (NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND | NFS3_ACCESS_DELETE))
+ entry->mask |= MAY_WRITE;
+ if (res.access & (NFS3_ACCESS_LOOKUP|NFS3_ACCESS_EXECUTE))
+ entry->mask |= MAY_EXEC;
+ }
+ nfs_free_fattr(res.fattr);
+out:
+ dprintk("NFS reply access: %d\n", status);
+ return status;
+}
+
+static int nfs3_proc_readlink(struct inode *inode, struct page *page,
+ unsigned int pgbase, unsigned int pglen)
+{
+ struct nfs_fattr *fattr;
+ struct nfs3_readlinkargs args = {
+ .fh = NFS_FH(inode),
+ .pgbase = pgbase,
+ .pglen = pglen,
+ .pages = &page
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_READLINK],
+ .rpc_argp = &args,
+ };
+ int status = -ENOMEM;
+
+ dprintk("NFS call readlink\n");
+ fattr = nfs_alloc_fattr();
+ if (fattr == NULL)
+ goto out;
+ msg.rpc_resp = fattr;
+
+ status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ nfs_refresh_inode(inode, fattr);
+ nfs_free_fattr(fattr);
+out:
+ dprintk("NFS reply readlink: %d\n", status);
+ return status;
+}
+
+struct nfs3_createdata {
+ struct rpc_message msg;
+ union {
+ struct nfs3_createargs create;
+ struct nfs3_mkdirargs mkdir;
+ struct nfs3_symlinkargs symlink;
+ struct nfs3_mknodargs mknod;
+ } arg;
+ struct nfs3_diropres res;
+ struct nfs_fh fh;
+ struct nfs_fattr fattr;
+ struct nfs_fattr dir_attr;
+};
+
+static struct nfs3_createdata *nfs3_alloc_createdata(void)
+{
+ struct nfs3_createdata *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data != NULL) {
+ data->msg.rpc_argp = &data->arg;
+ data->msg.rpc_resp = &data->res;
+ data->res.fh = &data->fh;
+ data->res.fattr = &data->fattr;
+ data->res.dir_attr = &data->dir_attr;
+ nfs_fattr_init(data->res.fattr);
+ nfs_fattr_init(data->res.dir_attr);
+ }
+ return data;
+}
+
+static int nfs3_do_create(struct inode *dir, struct dentry *dentry, struct nfs3_createdata *data)
+{
+ int status;
+
+ status = rpc_call_sync(NFS_CLIENT(dir), &data->msg, 0);
+ nfs_post_op_update_inode(dir, data->res.dir_attr);
+ if (status == 0)
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ return status;
+}
+
+static void nfs3_free_createdata(struct nfs3_createdata *data)
+{
+ kfree(data);
+}
+
+/*
+ * Create a regular file.
+ */
+static int
+nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ int flags, struct nfs_open_context *ctx)
+{
+ struct nfs3_createdata *data;
+ umode_t mode = sattr->ia_mode;
+ int status = -ENOMEM;
+
+ dprintk("NFS call create %s\n", dentry->d_name.name);
+
+ data = nfs3_alloc_createdata();
+ if (data == NULL)
+ goto out;
+
+ data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_CREATE];
+ data->arg.create.fh = NFS_FH(dir);
+ data->arg.create.name = dentry->d_name.name;
+ data->arg.create.len = dentry->d_name.len;
+ data->arg.create.sattr = sattr;
+
+ data->arg.create.createmode = NFS3_CREATE_UNCHECKED;
+ if (flags & O_EXCL) {
+ data->arg.create.createmode = NFS3_CREATE_EXCLUSIVE;
+ data->arg.create.verifier[0] = jiffies;
+ data->arg.create.verifier[1] = current->pid;
+ }
+
+ sattr->ia_mode &= ~current_umask();
+
+ for (;;) {
+ status = nfs3_do_create(dir, dentry, data);
+
+ if (status != -ENOTSUPP)
+ break;
+ /* If the server doesn't support the exclusive creation
+ * semantics, try again with simple 'guarded' mode. */
+ switch (data->arg.create.createmode) {
+ case NFS3_CREATE_EXCLUSIVE:
+ data->arg.create.createmode = NFS3_CREATE_GUARDED;
+ break;
+
+ case NFS3_CREATE_GUARDED:
+ data->arg.create.createmode = NFS3_CREATE_UNCHECKED;
+ break;
+
+ case NFS3_CREATE_UNCHECKED:
+ goto out;
+ }
+ nfs_fattr_init(data->res.dir_attr);
+ nfs_fattr_init(data->res.fattr);
+ }
+
+ if (status != 0)
+ goto out;
+
+ /* When we created the file with exclusive semantics, make
+ * sure we set the attributes afterwards. */
+ if (data->arg.create.createmode == NFS3_CREATE_EXCLUSIVE) {
+ dprintk("NFS call setattr (post-create)\n");
+
+ if (!(sattr->ia_valid & ATTR_ATIME_SET))
+ sattr->ia_valid |= ATTR_ATIME;
+ if (!(sattr->ia_valid & ATTR_MTIME_SET))
+ sattr->ia_valid |= ATTR_MTIME;
+
+ /* Note: we could use a guarded setattr here, but I'm
+ * not sure this buys us anything (and I'd have
+ * to revamp the NFSv3 XDR code) */
+ status = nfs3_proc_setattr(dentry, data->res.fattr, sattr);
+ nfs_post_op_update_inode(dentry->d_inode, data->res.fattr);
+ dprintk("NFS reply setattr (post-create): %d\n", status);
+ if (status != 0)
+ goto out;
+ }
+ status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode);
+out:
+ nfs3_free_createdata(data);
+ dprintk("NFS reply create: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_remove(struct inode *dir, struct qstr *name)
+{
+ struct nfs_removeargs arg = {
+ .fh = NFS_FH(dir),
+ .name.len = name->len,
+ .name.name = name->name,
+ };
+ struct nfs_removeres res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int status = -ENOMEM;
+
+ dprintk("NFS call remove %s\n", name->name);
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
+
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_free_fattr(res.dir_attr);
+out:
+ dprintk("NFS reply remove: %d\n", status);
+ return status;
+}
+
+static void
+nfs3_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
+{
+ msg->rpc_proc = &nfs3_procedures[NFS3PROC_REMOVE];
+}
+
+static void nfs3_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
+{
+ rpc_call_start(task);
+}
+
+static int
+nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir)
+{
+ struct nfs_removeres *res;
+ if (nfs3_async_handle_jukebox(task, dir))
+ return 0;
+ res = task->tk_msg.rpc_resp;
+ nfs_post_op_update_inode(dir, res->dir_attr);
+ return 1;
+}
+
+static void
+nfs3_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+{
+ msg->rpc_proc = &nfs3_procedures[NFS3PROC_RENAME];
+}
+
+static void nfs3_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
+{
+ rpc_call_start(task);
+}
+
+static int
+nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
+ struct inode *new_dir)
+{
+ struct nfs_renameres *res;
+
+ if (nfs3_async_handle_jukebox(task, old_dir))
+ return 0;
+ res = task->tk_msg.rpc_resp;
+
+ nfs_post_op_update_inode(old_dir, res->old_fattr);
+ nfs_post_op_update_inode(new_dir, res->new_fattr);
+ return 1;
+}
+
+static int
+nfs3_proc_rename(struct inode *old_dir, struct qstr *old_name,
+ struct inode *new_dir, struct qstr *new_name)
+{
+ struct nfs_renameargs arg = {
+ .old_dir = NFS_FH(old_dir),
+ .old_name = old_name,
+ .new_dir = NFS_FH(new_dir),
+ .new_name = new_name,
+ };
+ struct nfs_renameres res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_RENAME],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int status = -ENOMEM;
+
+ dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
+
+ res.old_fattr = nfs_alloc_fattr();
+ res.new_fattr = nfs_alloc_fattr();
+ if (res.old_fattr == NULL || res.new_fattr == NULL)
+ goto out;
+
+ status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
+ nfs_post_op_update_inode(old_dir, res.old_fattr);
+ nfs_post_op_update_inode(new_dir, res.new_fattr);
+out:
+ nfs_free_fattr(res.old_fattr);
+ nfs_free_fattr(res.new_fattr);
+ dprintk("NFS reply rename: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+ struct nfs3_linkargs arg = {
+ .fromfh = NFS_FH(inode),
+ .tofh = NFS_FH(dir),
+ .toname = name->name,
+ .tolen = name->len
+ };
+ struct nfs3_linkres res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_LINK],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int status = -ENOMEM;
+
+ dprintk("NFS call link %s\n", name->name);
+ res.fattr = nfs_alloc_fattr();
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.fattr == NULL || res.dir_attr == NULL)
+ goto out;
+
+ status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_post_op_update_inode(inode, res.fattr);
+out:
+ nfs_free_fattr(res.dir_attr);
+ nfs_free_fattr(res.fattr);
+ dprintk("NFS reply link: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
+ unsigned int len, struct iattr *sattr)
+{
+ struct nfs3_createdata *data;
+ int status = -ENOMEM;
+
+ if (len > NFS3_MAXPATHLEN)
+ return -ENAMETOOLONG;
+
+ dprintk("NFS call symlink %s\n", dentry->d_name.name);
+
+ data = nfs3_alloc_createdata();
+ if (data == NULL)
+ goto out;
+ data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_SYMLINK];
+ data->arg.symlink.fromfh = NFS_FH(dir);
+ data->arg.symlink.fromname = dentry->d_name.name;
+ data->arg.symlink.fromlen = dentry->d_name.len;
+ data->arg.symlink.pages = &page;
+ data->arg.symlink.pathlen = len;
+ data->arg.symlink.sattr = sattr;
+
+ status = nfs3_do_create(dir, dentry, data);
+
+ nfs3_free_createdata(data);
+out:
+ dprintk("NFS reply symlink: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
+{
+ struct nfs3_createdata *data;
+ umode_t mode = sattr->ia_mode;
+ int status = -ENOMEM;
+
+ dprintk("NFS call mkdir %s\n", dentry->d_name.name);
+
+ sattr->ia_mode &= ~current_umask();
+
+ data = nfs3_alloc_createdata();
+ if (data == NULL)
+ goto out;
+
+ data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKDIR];
+ data->arg.mkdir.fh = NFS_FH(dir);
+ data->arg.mkdir.name = dentry->d_name.name;
+ data->arg.mkdir.len = dentry->d_name.len;
+ data->arg.mkdir.sattr = sattr;
+
+ status = nfs3_do_create(dir, dentry, data);
+ if (status != 0)
+ goto out;
+
+ status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode);
+out:
+ nfs3_free_createdata(data);
+ dprintk("NFS reply mkdir: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
+{
+ struct nfs_fattr *dir_attr;
+ struct nfs3_diropargs arg = {
+ .fh = NFS_FH(dir),
+ .name = name->name,
+ .len = name->len
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_RMDIR],
+ .rpc_argp = &arg,
+ };
+ int status = -ENOMEM;
+
+ dprintk("NFS call rmdir %s\n", name->name);
+ dir_attr = nfs_alloc_fattr();
+ if (dir_attr == NULL)
+ goto out;
+
+ msg.rpc_resp = dir_attr;
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_post_op_update_inode(dir, dir_attr);
+ nfs_free_fattr(dir_attr);
+out:
+ dprintk("NFS reply rmdir: %d\n", status);
+ return status;
+}
+
+/*
+ * The READDIR implementation is somewhat hackish - we pass the user buffer
+ * to the encode function, which installs it in the receive iovec.
+ * The decode function itself doesn't perform any decoding, it just makes
+ * sure the reply is syntactically correct.
+ *
+ * Also note that this implementation handles both plain readdir and
+ * readdirplus.
+ */
+static int
+nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ u64 cookie, struct page **pages, unsigned int count, int plus)
+{
+ struct inode *dir = dentry->d_inode;
+ __be32 *verf = NFS_COOKIEVERF(dir);
+ struct nfs3_readdirargs arg = {
+ .fh = NFS_FH(dir),
+ .cookie = cookie,
+ .verf = {verf[0], verf[1]},
+ .plus = plus,
+ .count = count,
+ .pages = pages
+ };
+ struct nfs3_readdirres res = {
+ .verf = verf,
+ .plus = plus
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_READDIR],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ .rpc_cred = cred
+ };
+ int status = -ENOMEM;
+
+ if (plus)
+ msg.rpc_proc = &nfs3_procedures[NFS3PROC_READDIRPLUS];
+
+ dprintk("NFS call readdir%s %d\n",
+ plus? "plus" : "", (unsigned int) cookie);
+
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
+
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+
+ nfs_invalidate_atime(dir);
+ nfs_refresh_inode(dir, res.dir_attr);
+
+ nfs_free_fattr(res.dir_attr);
+out:
+ dprintk("NFS reply readdir%s: %d\n",
+ plus? "plus" : "", status);
+ return status;
+}
+
+static int
+nfs3_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ dev_t rdev)
+{
+ struct nfs3_createdata *data;
+ umode_t mode = sattr->ia_mode;
+ int status = -ENOMEM;
+
+ dprintk("NFS call mknod %s %u:%u\n", dentry->d_name.name,
+ MAJOR(rdev), MINOR(rdev));
+
+ sattr->ia_mode &= ~current_umask();
+
+ data = nfs3_alloc_createdata();
+ if (data == NULL)
+ goto out;
+
+ data->msg.rpc_proc = &nfs3_procedures[NFS3PROC_MKNOD];
+ data->arg.mknod.fh = NFS_FH(dir);
+ data->arg.mknod.name = dentry->d_name.name;
+ data->arg.mknod.len = dentry->d_name.len;
+ data->arg.mknod.sattr = sattr;
+ data->arg.mknod.rdev = rdev;
+
+ switch (sattr->ia_mode & S_IFMT) {
+ case S_IFBLK:
+ data->arg.mknod.type = NF3BLK;
+ break;
+ case S_IFCHR:
+ data->arg.mknod.type = NF3CHR;
+ break;
+ case S_IFIFO:
+ data->arg.mknod.type = NF3FIFO;
+ break;
+ case S_IFSOCK:
+ data->arg.mknod.type = NF3SOCK;
+ break;
+ default:
+ status = -EINVAL;
+ goto out;
+ }
+
+ status = nfs3_do_create(dir, dentry, data);
+ if (status != 0)
+ goto out;
+ status = nfs3_proc_set_default_acl(dir, dentry->d_inode, mode);
+out:
+ nfs3_free_createdata(data);
+ dprintk("NFS reply mknod: %d\n", status);
+ return status;
+}
+
+static int
+nfs3_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsstat *stat)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_FSSTAT],
+ .rpc_argp = fhandle,
+ .rpc_resp = stat,
+ };
+ int status;
+
+ dprintk("NFS call fsstat\n");
+ nfs_fattr_init(stat->fattr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ dprintk("NFS reply fsstat: %d\n", status);
+ return status;
+}
+
+static int
+do_proc_fsinfo(struct rpc_clnt *client, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_FSINFO],
+ .rpc_argp = fhandle,
+ .rpc_resp = info,
+ };
+ int status;
+
+ dprintk("NFS call fsinfo\n");
+ nfs_fattr_init(info->fattr);
+ status = rpc_call_sync(client, &msg, 0);
+ dprintk("NFS reply fsinfo: %d\n", status);
+ return status;
+}
+
+/*
+ * Bare-bones access to fsinfo: this is for nfs_get_root/nfs_get_sb via
+ * nfs_create_server
+ */
+static int
+nfs3_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ int status;
+
+ status = do_proc_fsinfo(server->client, fhandle, info);
+ if (status && server->nfs_client->cl_rpcclient != server->client)
+ status = do_proc_fsinfo(server->nfs_client->cl_rpcclient, fhandle, info);
+ return status;
+}
+
+static int
+nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_pathconf *info)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs3_procedures[NFS3PROC_PATHCONF],
+ .rpc_argp = fhandle,
+ .rpc_resp = info,
+ };
+ int status;
+
+ dprintk("NFS call pathconf\n");
+ nfs_fattr_init(info->fattr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ dprintk("NFS reply pathconf: %d\n", status);
+ return status;
+}
+
+static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data)
+{
+ if (nfs3_async_handle_jukebox(task, data->inode))
+ return -EAGAIN;
+
+ nfs_invalidate_atime(data->inode);
+ nfs_refresh_inode(data->inode, &data->fattr);
+ return 0;
+}
+
+static void nfs3_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
+{
+ msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ];
+}
+
+static void nfs3_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
+{
+ rpc_call_start(task);
+}
+
+static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (nfs3_async_handle_jukebox(task, data->inode))
+ return -EAGAIN;
+ if (task->tk_status >= 0)
+ nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+ return 0;
+}
+
+static void nfs3_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
+{
+ msg->rpc_proc = &nfs3_procedures[NFS3PROC_WRITE];
+}
+
+static void nfs3_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
+{
+ rpc_call_start(task);
+}
+
+static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (nfs3_async_handle_jukebox(task, data->inode))
+ return -EAGAIN;
+ nfs_refresh_inode(data->inode, data->res.fattr);
+ return 0;
+}
+
+static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+{
+ msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT];
+}
+
+static int
+nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+
+ return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+}
+
+const struct nfs_rpc_ops nfs_v3_clientops = {
+ .version = 3, /* protocol version */
+ .dentry_ops = &nfs_dentry_operations,
+ .dir_inode_ops = &nfs3_dir_inode_operations,
+ .file_inode_ops = &nfs3_file_inode_operations,
+ .file_ops = &nfs_file_operations,
+ .getroot = nfs3_proc_get_root,
+ .getattr = nfs3_proc_getattr,
+ .setattr = nfs3_proc_setattr,
+ .lookup = nfs3_proc_lookup,
+ .access = nfs3_proc_access,
+ .readlink = nfs3_proc_readlink,
+ .create = nfs3_proc_create,
+ .remove = nfs3_proc_remove,
+ .unlink_setup = nfs3_proc_unlink_setup,
+ .unlink_rpc_prepare = nfs3_proc_unlink_rpc_prepare,
+ .unlink_done = nfs3_proc_unlink_done,
+ .rename = nfs3_proc_rename,
+ .rename_setup = nfs3_proc_rename_setup,
+ .rename_rpc_prepare = nfs3_proc_rename_rpc_prepare,
+ .rename_done = nfs3_proc_rename_done,
+ .link = nfs3_proc_link,
+ .symlink = nfs3_proc_symlink,
+ .mkdir = nfs3_proc_mkdir,
+ .rmdir = nfs3_proc_rmdir,
+ .readdir = nfs3_proc_readdir,
+ .mknod = nfs3_proc_mknod,
+ .statfs = nfs3_proc_statfs,
+ .fsinfo = nfs3_proc_fsinfo,
+ .pathconf = nfs3_proc_pathconf,
+ .decode_dirent = nfs3_decode_dirent,
+ .read_setup = nfs3_proc_read_setup,
+ .read_rpc_prepare = nfs3_proc_read_rpc_prepare,
+ .read_done = nfs3_read_done,
+ .write_setup = nfs3_proc_write_setup,
+ .write_rpc_prepare = nfs3_proc_write_rpc_prepare,
+ .write_done = nfs3_write_done,
+ .commit_setup = nfs3_proc_commit_setup,
+ .commit_done = nfs3_commit_done,
+ .lock = nfs3_proc_lock,
+ .clear_acl_cache = nfs3_forget_cached_acls,
+ .close_context = nfs_close_context,
+ .init_client = nfs_init_client,
+};
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
new file mode 100644
index 00000000..a77cc9a3
--- /dev/null
+++ b/fs/nfs/nfs3xdr.c
@@ -0,0 +1,2498 @@
+/*
+ * linux/fs/nfs/nfs3xdr.c
+ *
+ * XDR functions to encode/decode NFSv3 RPC arguments and results.
+ *
+ * Copyright (C) 1996, 1997 Olaf Kirch
+ */
+
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/kdev_t.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs3.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfsacl.h>
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_XDR
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO EIO
+
+/*
+ * Declare the space requirements for NFS arguments and replies as
+ * number of 32bit-words
+ */
+#define NFS3_fhandle_sz (1+16)
+#define NFS3_fh_sz (NFS3_fhandle_sz) /* shorthand */
+#define NFS3_sattr_sz (15)
+#define NFS3_filename_sz (1+(NFS3_MAXNAMLEN>>2))
+#define NFS3_path_sz (1+(NFS3_MAXPATHLEN>>2))
+#define NFS3_fattr_sz (21)
+#define NFS3_cookieverf_sz (NFS3_COOKIEVERFSIZE>>2)
+#define NFS3_wcc_attr_sz (6)
+#define NFS3_pre_op_attr_sz (1+NFS3_wcc_attr_sz)
+#define NFS3_post_op_attr_sz (1+NFS3_fattr_sz)
+#define NFS3_wcc_data_sz (NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
+#define NFS3_diropargs_sz (NFS3_fh_sz+NFS3_filename_sz)
+
+#define NFS3_getattrargs_sz (NFS3_fh_sz)
+#define NFS3_setattrargs_sz (NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_lookupargs_sz (NFS3_fh_sz+NFS3_filename_sz)
+#define NFS3_accessargs_sz (NFS3_fh_sz+1)
+#define NFS3_readlinkargs_sz (NFS3_fh_sz)
+#define NFS3_readargs_sz (NFS3_fh_sz+3)
+#define NFS3_writeargs_sz (NFS3_fh_sz+5)
+#define NFS3_createargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz)
+#define NFS3_mkdirargs_sz (NFS3_diropargs_sz+NFS3_sattr_sz)
+#define NFS3_symlinkargs_sz (NFS3_diropargs_sz+1+NFS3_sattr_sz)
+#define NFS3_mknodargs_sz (NFS3_diropargs_sz+2+NFS3_sattr_sz)
+#define NFS3_removeargs_sz (NFS3_fh_sz+NFS3_filename_sz)
+#define NFS3_renameargs_sz (NFS3_diropargs_sz+NFS3_diropargs_sz)
+#define NFS3_linkargs_sz (NFS3_fh_sz+NFS3_diropargs_sz)
+#define NFS3_readdirargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+3)
+#define NFS3_readdirplusargs_sz (NFS3_fh_sz+NFS3_cookieverf_sz+4)
+#define NFS3_commitargs_sz (NFS3_fh_sz+3)
+
+#define NFS3_getattrres_sz (1+NFS3_fattr_sz)
+#define NFS3_setattrres_sz (1+NFS3_wcc_data_sz)
+#define NFS3_removeres_sz (NFS3_setattrres_sz)
+#define NFS3_lookupres_sz (1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
+#define NFS3_accessres_sz (1+NFS3_post_op_attr_sz+1)
+#define NFS3_readlinkres_sz (1+NFS3_post_op_attr_sz+1)
+#define NFS3_readres_sz (1+NFS3_post_op_attr_sz+3)
+#define NFS3_writeres_sz (1+NFS3_wcc_data_sz+4)
+#define NFS3_createres_sz (1+NFS3_fh_sz+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
+#define NFS3_renameres_sz (1+(2 * NFS3_wcc_data_sz))
+#define NFS3_linkres_sz (1+NFS3_post_op_attr_sz+NFS3_wcc_data_sz)
+#define NFS3_readdirres_sz (1+NFS3_post_op_attr_sz+2)
+#define NFS3_fsstatres_sz (1+NFS3_post_op_attr_sz+13)
+#define NFS3_fsinfores_sz (1+NFS3_post_op_attr_sz+12)
+#define NFS3_pathconfres_sz (1+NFS3_post_op_attr_sz+6)
+#define NFS3_commitres_sz (1+NFS3_wcc_data_sz+2)
+
+#define ACL3_getaclargs_sz (NFS3_fh_sz+1)
+#define ACL3_setaclargs_sz (NFS3_fh_sz+1+ \
+ XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
+#define ACL3_getaclres_sz (1+NFS3_post_op_attr_sz+1+ \
+ XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
+#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
+
+/*
+ * Map file type to S_IFMT bits
+ */
+static const umode_t nfs_type2fmt[] = {
+ [NF3BAD] = 0,
+ [NF3REG] = S_IFREG,
+ [NF3DIR] = S_IFDIR,
+ [NF3BLK] = S_IFBLK,
+ [NF3CHR] = S_IFCHR,
+ [NF3LNK] = S_IFLNK,
+ [NF3SOCK] = S_IFSOCK,
+ [NF3FIFO] = S_IFIFO,
+};
+
+/*
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
+ */
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+ unsigned int base, unsigned int len,
+ unsigned int bufsize)
+{
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
+ unsigned int replen;
+
+ replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("NFS: %s prematurely hit the end of our receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NFSv3 basic data types
+ *
+ * Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions. For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_uint32(struct xdr_stream *xdr, u32 value)
+{
+ __be32 *p = xdr_reserve_space(xdr, 4);
+ *p = cpu_to_be32(value);
+}
+
+static int decode_uint32(struct xdr_stream *xdr, u32 *value)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *value = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_uint64(struct xdr_stream *xdr, u64 *value)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ xdr_decode_hyper(p, value);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * fileid3
+ *
+ * typedef uint64 fileid3;
+ */
+static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
+{
+ return xdr_decode_hyper(p, fileid);
+}
+
+static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid)
+{
+ return decode_uint64(xdr, fileid);
+}
+
+/*
+ * filename3
+ *
+ * typedef string filename3<>;
+ */
+static void encode_filename3(struct xdr_stream *xdr,
+ const char *name, u32 length)
+{
+ __be32 *p;
+
+ BUG_ON(length > NFS3_MAXNAMLEN);
+ p = xdr_reserve_space(xdr, 4 + length);
+ xdr_encode_opaque(p, name, length);
+}
+
+static int decode_inline_filename3(struct xdr_stream *xdr,
+ const char **name, u32 *length)
+{
+ __be32 *p;
+ u32 count;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p);
+ if (count > NFS3_MAXNAMLEN)
+ goto out_nametoolong;
+ p = xdr_inline_decode(xdr, count);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *name = (const char *)p;
+ *length = count;
+ return 0;
+
+out_nametoolong:
+ dprintk("NFS: returned filename too long: %u\n", count);
+ return -ENAMETOOLONG;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * nfspath3
+ *
+ * typedef string nfspath3<>;
+ */
+static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
+ const u32 length)
+{
+ BUG_ON(length > NFS3_MAXPATHLEN);
+ encode_uint32(xdr, length);
+ xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_nfspath3(struct xdr_stream *xdr)
+{
+ u32 recvd, count;
+ size_t hdrlen;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p);
+ if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
+ goto out_nametoolong;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(count > recvd))
+ goto out_cheating;
+
+ xdr_read_pages(xdr, count);
+ xdr_terminate_string(xdr->buf, count);
+ return 0;
+
+out_nametoolong:
+ dprintk("NFS: returned pathname too long: %u\n", count);
+ return -ENAMETOOLONG;
+out_cheating:
+ dprintk("NFS: server cheating in pathname result: "
+ "count %u > recvd %u\n", count, recvd);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * cookie3
+ *
+ * typedef uint64 cookie3
+ */
+static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
+{
+ return xdr_encode_hyper(p, cookie);
+}
+
+static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie)
+{
+ return decode_uint64(xdr, cookie);
+}
+
+/*
+ * cookieverf3
+ *
+ * typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE];
+ */
+static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
+{
+ memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
+ return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
+}
+
+static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * createverf3
+ *
+ * typedef opaque createverf3[NFS3_CREATEVERFSIZE];
+ */
+static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
+ memcpy(p, verifier, NFS3_CREATEVERFSIZE);
+}
+
+static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ memcpy(verifier, p, NFS3_WRITEVERFSIZE);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * size3
+ *
+ * typedef uint64 size3;
+ */
+static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
+{
+ return xdr_decode_hyper(p, size);
+}
+
+/*
+ * nfsstat3
+ *
+ * enum nfsstat3 {
+ * NFS3_OK = 0,
+ * ...
+ * }
+ */
+#define NFS3_OK NFS_OK
+
+static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ *status = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * ftype3
+ *
+ * enum ftype3 {
+ * NF3REG = 1,
+ * NF3DIR = 2,
+ * NF3BLK = 3,
+ * NF3CHR = 4,
+ * NF3LNK = 5,
+ * NF3SOCK = 6,
+ * NF3FIFO = 7
+ * };
+ */
+static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
+{
+ BUG_ON(type > NF3FIFO);
+ encode_uint32(xdr, type);
+}
+
+static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
+{
+ u32 type;
+
+ type = be32_to_cpup(p++);
+ if (type > NF3FIFO)
+ type = NF3NON;
+ *mode = nfs_type2fmt[type];
+ return p;
+}
+
+/*
+ * specdata3
+ *
+ * struct specdata3 {
+ * uint32 specdata1;
+ * uint32 specdata2;
+ * };
+ */
+static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(MAJOR(rdev));
+ *p = cpu_to_be32(MINOR(rdev));
+}
+
+static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
+{
+ unsigned int major, minor;
+
+ major = be32_to_cpup(p++);
+ minor = be32_to_cpup(p++);
+ *rdev = MKDEV(major, minor);
+ if (MAJOR(*rdev) != major || MINOR(*rdev) != minor)
+ *rdev = 0;
+ return p;
+}
+
+/*
+ * nfs_fh3
+ *
+ * struct nfs_fh3 {
+ * opaque data<NFS3_FHSIZE>;
+ * };
+ */
+static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+ __be32 *p;
+
+ BUG_ON(fh->size > NFS3_FHSIZE);
+ p = xdr_reserve_space(xdr, 4 + fh->size);
+ xdr_encode_opaque(p, fh->data, fh->size);
+}
+
+static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ u32 length;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ length = be32_to_cpup(p++);
+ if (unlikely(length > NFS3_FHSIZE))
+ goto out_toobig;
+ p = xdr_inline_decode(xdr, length);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ fh->size = length;
+ memcpy(fh->data, p, length);
+ return 0;
+out_toobig:
+ dprintk("NFS: file handle size (%u) too big\n", length);
+ return -E2BIG;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static void zero_nfs_fh3(struct nfs_fh *fh)
+{
+ memset(fh, 0, sizeof(*fh));
+}
+
+/*
+ * nfstime3
+ *
+ * struct nfstime3 {
+ * uint32 seconds;
+ * uint32 nseconds;
+ * };
+ */
+static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
+{
+ *p++ = cpu_to_be32(timep->tv_sec);
+ *p++ = cpu_to_be32(timep->tv_nsec);
+ return p;
+}
+
+static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
+{
+ timep->tv_sec = be32_to_cpup(p++);
+ timep->tv_nsec = be32_to_cpup(p++);
+ return p;
+}
+
+/*
+ * sattr3
+ *
+ * enum time_how {
+ * DONT_CHANGE = 0,
+ * SET_TO_SERVER_TIME = 1,
+ * SET_TO_CLIENT_TIME = 2
+ * };
+ *
+ * union set_mode3 switch (bool set_it) {
+ * case TRUE:
+ * mode3 mode;
+ * default:
+ * void;
+ * };
+ *
+ * union set_uid3 switch (bool set_it) {
+ * case TRUE:
+ * uid3 uid;
+ * default:
+ * void;
+ * };
+ *
+ * union set_gid3 switch (bool set_it) {
+ * case TRUE:
+ * gid3 gid;
+ * default:
+ * void;
+ * };
+ *
+ * union set_size3 switch (bool set_it) {
+ * case TRUE:
+ * size3 size;
+ * default:
+ * void;
+ * };
+ *
+ * union set_atime switch (time_how set_it) {
+ * case SET_TO_CLIENT_TIME:
+ * nfstime3 atime;
+ * default:
+ * void;
+ * };
+ *
+ * union set_mtime switch (time_how set_it) {
+ * case SET_TO_CLIENT_TIME:
+ * nfstime3 mtime;
+ * default:
+ * void;
+ * };
+ *
+ * struct sattr3 {
+ * set_mode3 mode;
+ * set_uid3 uid;
+ * set_gid3 gid;
+ * set_size3 size;
+ * set_atime atime;
+ * set_mtime mtime;
+ * };
+ */
+static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
+{
+ u32 nbytes;
+ __be32 *p;
+
+ /*
+ * In order to make only a single xdr_reserve_space() call,
+ * pre-compute the total number of bytes to be reserved.
+ * Six boolean values, one for each set_foo field, are always
+ * present in the encoded result, so start there.
+ */
+ nbytes = 6 * 4;
+ if (attr->ia_valid & ATTR_MODE)
+ nbytes += 4;
+ if (attr->ia_valid & ATTR_UID)
+ nbytes += 4;
+ if (attr->ia_valid & ATTR_GID)
+ nbytes += 4;
+ if (attr->ia_valid & ATTR_SIZE)
+ nbytes += 8;
+ if (attr->ia_valid & ATTR_ATIME_SET)
+ nbytes += 8;
+ if (attr->ia_valid & ATTR_MTIME_SET)
+ nbytes += 8;
+ p = xdr_reserve_space(xdr, nbytes);
+
+ if (attr->ia_valid & ATTR_MODE) {
+ *p++ = xdr_one;
+ *p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
+ } else
+ *p++ = xdr_zero;
+
+ if (attr->ia_valid & ATTR_UID) {
+ *p++ = xdr_one;
+ *p++ = cpu_to_be32(attr->ia_uid);
+ } else
+ *p++ = xdr_zero;
+
+ if (attr->ia_valid & ATTR_GID) {
+ *p++ = xdr_one;
+ *p++ = cpu_to_be32(attr->ia_gid);
+ } else
+ *p++ = xdr_zero;
+
+ if (attr->ia_valid & ATTR_SIZE) {
+ *p++ = xdr_one;
+ p = xdr_encode_hyper(p, (u64)attr->ia_size);
+ } else
+ *p++ = xdr_zero;
+
+ if (attr->ia_valid & ATTR_ATIME_SET) {
+ *p++ = xdr_two;
+ p = xdr_encode_nfstime3(p, &attr->ia_atime);
+ } else if (attr->ia_valid & ATTR_ATIME) {
+ *p++ = xdr_one;
+ } else
+ *p++ = xdr_zero;
+
+ if (attr->ia_valid & ATTR_MTIME_SET) {
+ *p++ = xdr_two;
+ xdr_encode_nfstime3(p, &attr->ia_mtime);
+ } else if (attr->ia_valid & ATTR_MTIME) {
+ *p = xdr_one;
+ } else
+ *p = xdr_zero;
+}
+
+/*
+ * fattr3
+ *
+ * struct fattr3 {
+ * ftype3 type;
+ * mode3 mode;
+ * uint32 nlink;
+ * uid3 uid;
+ * gid3 gid;
+ * size3 size;
+ * size3 used;
+ * specdata3 rdev;
+ * uint64 fsid;
+ * fileid3 fileid;
+ * nfstime3 atime;
+ * nfstime3 mtime;
+ * nfstime3 ctime;
+ * };
+ */
+static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ umode_t fmode;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+
+ p = xdr_decode_ftype3(p, &fmode);
+
+ fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
+ fattr->nlink = be32_to_cpup(p++);
+ fattr->uid = be32_to_cpup(p++);
+ fattr->gid = be32_to_cpup(p++);
+
+ p = xdr_decode_size3(p, &fattr->size);
+ p = xdr_decode_size3(p, &fattr->du.nfs3.used);
+ p = xdr_decode_specdata3(p, &fattr->rdev);
+
+ p = xdr_decode_hyper(p, &fattr->fsid.major);
+ fattr->fsid.minor = 0;
+
+ p = xdr_decode_fileid3(p, &fattr->fileid);
+ p = xdr_decode_nfstime3(p, &fattr->atime);
+ p = xdr_decode_nfstime3(p, &fattr->mtime);
+ xdr_decode_nfstime3(p, &fattr->ctime);
+
+ fattr->valid |= NFS_ATTR_FATTR_V3;
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * post_op_attr
+ *
+ * union post_op_attr switch (bool attributes_follow) {
+ * case TRUE:
+ * fattr3 attributes;
+ * case FALSE:
+ * void;
+ * };
+ */
+static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p != xdr_zero)
+ return decode_fattr3(xdr, fattr);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * wcc_attr
+ * struct wcc_attr {
+ * size3 size;
+ * nfstime3 mtime;
+ * nfstime3 ctime;
+ * };
+ */
+static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+
+ fattr->valid |= NFS_ATTR_FATTR_PRESIZE
+ | NFS_ATTR_FATTR_PREMTIME
+ | NFS_ATTR_FATTR_PRECTIME;
+
+ p = xdr_decode_size3(p, &fattr->pre_size);
+ p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
+ xdr_decode_nfstime3(p, &fattr->pre_ctime);
+
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * pre_op_attr
+ * union pre_op_attr switch (bool attributes_follow) {
+ * case TRUE:
+ * wcc_attr attributes;
+ * case FALSE:
+ * void;
+ * };
+ *
+ * wcc_data
+ *
+ * struct wcc_data {
+ * pre_op_attr before;
+ * post_op_attr after;
+ * };
+ */
+static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p != xdr_zero)
+ return decode_wcc_attr(xdr, fattr);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+ int error;
+
+ error = decode_pre_op_attr(xdr, fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, fattr);
+out:
+ return error;
+}
+
+/*
+ * post_op_fh3
+ *
+ * union post_op_fh3 switch (bool handle_follows) {
+ * case TRUE:
+ * nfs_fh3 handle;
+ * case FALSE:
+ * void;
+ * };
+ */
+static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p != xdr_zero)
+ return decode_nfs_fh3(xdr, fh);
+ zero_nfs_fh3(fh);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * diropargs3
+ *
+ * struct diropargs3 {
+ * nfs_fh3 dir;
+ * filename3 name;
+ * };
+ */
+static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
+ const char *name, u32 length)
+{
+ encode_nfs_fh3(xdr, fh);
+ encode_filename3(xdr, name, length);
+}
+
+
+/*
+ * NFSv3 XDR encode functions
+ *
+ * NFSv3 argument types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ */
+
+/*
+ * 3.3.1 GETATTR3args
+ *
+ * struct GETATTR3args {
+ * nfs_fh3 object;
+ * };
+ */
+static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_fh *fh)
+{
+ encode_nfs_fh3(xdr, fh);
+}
+
+/*
+ * 3.3.2 SETATTR3args
+ *
+ * union sattrguard3 switch (bool check) {
+ * case TRUE:
+ * nfstime3 obj_ctime;
+ * case FALSE:
+ * void;
+ * };
+ *
+ * struct SETATTR3args {
+ * nfs_fh3 object;
+ * sattr3 new_attributes;
+ * sattrguard3 guard;
+ * };
+ */
+static void encode_sattrguard3(struct xdr_stream *xdr,
+ const struct nfs3_sattrargs *args)
+{
+ __be32 *p;
+
+ if (args->guard) {
+ p = xdr_reserve_space(xdr, 4 + 8);
+ *p++ = xdr_one;
+ xdr_encode_nfstime3(p, &args->guardtime);
+ } else {
+ p = xdr_reserve_space(xdr, 4);
+ *p = xdr_zero;
+ }
+}
+
+static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_sattrargs *args)
+{
+ encode_nfs_fh3(xdr, args->fh);
+ encode_sattr3(xdr, args->sattr);
+ encode_sattrguard3(xdr, args);
+}
+
+/*
+ * 3.3.3 LOOKUP3args
+ *
+ * struct LOOKUP3args {
+ * diropargs3 what;
+ * };
+ */
+static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_diropargs *args)
+{
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+}
+
+/*
+ * 3.3.4 ACCESS3args
+ *
+ * struct ACCESS3args {
+ * nfs_fh3 object;
+ * uint32 access;
+ * };
+ */
+static void encode_access3args(struct xdr_stream *xdr,
+ const struct nfs3_accessargs *args)
+{
+ encode_nfs_fh3(xdr, args->fh);
+ encode_uint32(xdr, args->access);
+}
+
+static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_accessargs *args)
+{
+ encode_access3args(xdr, args);
+}
+
+/*
+ * 3.3.5 READLINK3args
+ *
+ * struct READLINK3args {
+ * nfs_fh3 symlink;
+ * };
+ */
+static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_readlinkargs *args)
+{
+ encode_nfs_fh3(xdr, args->fh);
+ prepare_reply_buffer(req, args->pages, args->pgbase,
+ args->pglen, NFS3_readlinkres_sz);
+}
+
+/*
+ * 3.3.6 READ3args
+ *
+ * struct READ3args {
+ * nfs_fh3 file;
+ * offset3 offset;
+ * count3 count;
+ * };
+ */
+static void encode_read3args(struct xdr_stream *xdr,
+ const struct nfs_readargs *args)
+{
+ __be32 *p;
+
+ encode_nfs_fh3(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 8 + 4);
+ p = xdr_encode_hyper(p, args->offset);
+ *p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_readargs *args)
+{
+ encode_read3args(xdr, args);
+ prepare_reply_buffer(req, args->pages, args->pgbase,
+ args->count, NFS3_readres_sz);
+ req->rq_rcv_buf.flags |= XDRBUF_READ;
+}
+
+/*
+ * 3.3.7 WRITE3args
+ *
+ * enum stable_how {
+ * UNSTABLE = 0,
+ * DATA_SYNC = 1,
+ * FILE_SYNC = 2
+ * };
+ *
+ * struct WRITE3args {
+ * nfs_fh3 file;
+ * offset3 offset;
+ * count3 count;
+ * stable_how stable;
+ * opaque data<>;
+ * };
+ */
+static void encode_write3args(struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
+{
+ __be32 *p;
+
+ encode_nfs_fh3(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
+ p = xdr_encode_hyper(p, args->offset);
+ *p++ = cpu_to_be32(args->count);
+ *p++ = cpu_to_be32(args->stable);
+ *p = cpu_to_be32(args->count);
+ xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
+}
+
+static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
+{
+ encode_write3args(xdr, args);
+ xdr->buf->flags |= XDRBUF_WRITE;
+}
+
+/*
+ * 3.3.8 CREATE3args
+ *
+ * enum createmode3 {
+ * UNCHECKED = 0,
+ * GUARDED = 1,
+ * EXCLUSIVE = 2
+ * };
+ *
+ * union createhow3 switch (createmode3 mode) {
+ * case UNCHECKED:
+ * case GUARDED:
+ * sattr3 obj_attributes;
+ * case EXCLUSIVE:
+ * createverf3 verf;
+ * };
+ *
+ * struct CREATE3args {
+ * diropargs3 where;
+ * createhow3 how;
+ * };
+ */
+static void encode_createhow3(struct xdr_stream *xdr,
+ const struct nfs3_createargs *args)
+{
+ encode_uint32(xdr, args->createmode);
+ switch (args->createmode) {
+ case NFS3_CREATE_UNCHECKED:
+ case NFS3_CREATE_GUARDED:
+ encode_sattr3(xdr, args->sattr);
+ break;
+ case NFS3_CREATE_EXCLUSIVE:
+ encode_createverf3(xdr, args->verifier);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_createargs *args)
+{
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+ encode_createhow3(xdr, args);
+}
+
+/*
+ * 3.3.9 MKDIR3args
+ *
+ * struct MKDIR3args {
+ * diropargs3 where;
+ * sattr3 attributes;
+ * };
+ */
+static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_mkdirargs *args)
+{
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+ encode_sattr3(xdr, args->sattr);
+}
+
+/*
+ * 3.3.10 SYMLINK3args
+ *
+ * struct symlinkdata3 {
+ * sattr3 symlink_attributes;
+ * nfspath3 symlink_data;
+ * };
+ *
+ * struct SYMLINK3args {
+ * diropargs3 where;
+ * symlinkdata3 symlink;
+ * };
+ */
+static void encode_symlinkdata3(struct xdr_stream *xdr,
+ const struct nfs3_symlinkargs *args)
+{
+ encode_sattr3(xdr, args->sattr);
+ encode_nfspath3(xdr, args->pages, args->pathlen);
+}
+
+static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_symlinkargs *args)
+{
+ encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
+ encode_symlinkdata3(xdr, args);
+}
+
+/*
+ * 3.3.11 MKNOD3args
+ *
+ * struct devicedata3 {
+ * sattr3 dev_attributes;
+ * specdata3 spec;
+ * };
+ *
+ * union mknoddata3 switch (ftype3 type) {
+ * case NF3CHR:
+ * case NF3BLK:
+ * devicedata3 device;
+ * case NF3SOCK:
+ * case NF3FIFO:
+ * sattr3 pipe_attributes;
+ * default:
+ * void;
+ * };
+ *
+ * struct MKNOD3args {
+ * diropargs3 where;
+ * mknoddata3 what;
+ * };
+ */
+static void encode_devicedata3(struct xdr_stream *xdr,
+ const struct nfs3_mknodargs *args)
+{
+ encode_sattr3(xdr, args->sattr);
+ encode_specdata3(xdr, args->rdev);
+}
+
+static void encode_mknoddata3(struct xdr_stream *xdr,
+ const struct nfs3_mknodargs *args)
+{
+ encode_ftype3(xdr, args->type);
+ switch (args->type) {
+ case NF3CHR:
+ case NF3BLK:
+ encode_devicedata3(xdr, args);
+ break;
+ case NF3SOCK:
+ case NF3FIFO:
+ encode_sattr3(xdr, args->sattr);
+ break;
+ case NF3REG:
+ case NF3DIR:
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_mknodargs *args)
+{
+ encode_diropargs3(xdr, args->fh, args->name, args->len);
+ encode_mknoddata3(xdr, args);
+}
+
+/*
+ * 3.3.12 REMOVE3args
+ *
+ * struct REMOVE3args {
+ * diropargs3 object;
+ * };
+ */
+static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_removeargs *args)
+{
+ encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
+}
+
+/*
+ * 3.3.14 RENAME3args
+ *
+ * struct RENAME3args {
+ * diropargs3 from;
+ * diropargs3 to;
+ * };
+ */
+static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_renameargs *args)
+{
+ const struct qstr *old = args->old_name;
+ const struct qstr *new = args->new_name;
+
+ encode_diropargs3(xdr, args->old_dir, old->name, old->len);
+ encode_diropargs3(xdr, args->new_dir, new->name, new->len);
+}
+
+/*
+ * 3.3.15 LINK3args
+ *
+ * struct LINK3args {
+ * nfs_fh3 file;
+ * diropargs3 link;
+ * };
+ */
+static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_linkargs *args)
+{
+ encode_nfs_fh3(xdr, args->fromfh);
+ encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
+}
+
+/*
+ * 3.3.16 READDIR3args
+ *
+ * struct READDIR3args {
+ * nfs_fh3 dir;
+ * cookie3 cookie;
+ * cookieverf3 cookieverf;
+ * count3 count;
+ * };
+ */
+static void encode_readdir3args(struct xdr_stream *xdr,
+ const struct nfs3_readdirargs *args)
+{
+ __be32 *p;
+
+ encode_nfs_fh3(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
+ p = xdr_encode_cookie3(p, args->cookie);
+ p = xdr_encode_cookieverf3(p, args->verf);
+ *p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_readdirargs *args)
+{
+ encode_readdir3args(xdr, args);
+ prepare_reply_buffer(req, args->pages, 0,
+ args->count, NFS3_readdirres_sz);
+}
+
+/*
+ * 3.3.17 READDIRPLUS3args
+ *
+ * struct READDIRPLUS3args {
+ * nfs_fh3 dir;
+ * cookie3 cookie;
+ * cookieverf3 cookieverf;
+ * count3 dircount;
+ * count3 maxcount;
+ * };
+ */
+static void encode_readdirplus3args(struct xdr_stream *xdr,
+ const struct nfs3_readdirargs *args)
+{
+ __be32 *p;
+
+ encode_nfs_fh3(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
+ p = xdr_encode_cookie3(p, args->cookie);
+ p = xdr_encode_cookieverf3(p, args->verf);
+
+ /*
+ * readdirplus: need dircount + buffer size.
+ * We just make sure we make dircount big enough
+ */
+ *p++ = cpu_to_be32(args->count >> 3);
+
+ *p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_readdirargs *args)
+{
+ encode_readdirplus3args(xdr, args);
+ prepare_reply_buffer(req, args->pages, 0,
+ args->count, NFS3_readdirres_sz);
+}
+
+/*
+ * 3.3.21 COMMIT3args
+ *
+ * struct COMMIT3args {
+ * nfs_fh3 file;
+ * offset3 offset;
+ * count3 count;
+ * };
+ */
+static void encode_commit3args(struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
+{
+ __be32 *p;
+
+ encode_nfs_fh3(xdr, args->fh);
+
+ p = xdr_reserve_space(xdr, 8 + 4);
+ p = xdr_encode_hyper(p, args->offset);
+ *p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs_writeargs *args)
+{
+ encode_commit3args(xdr, args);
+}
+
+#ifdef CONFIG_NFS_V3_ACL
+
+static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_getaclargs *args)
+{
+ encode_nfs_fh3(xdr, args->fh);
+ encode_uint32(xdr, args->mask);
+ if (args->mask & (NFS_ACL | NFS_DFACL))
+ prepare_reply_buffer(req, args->pages, 0,
+ NFSACL_MAXPAGES << PAGE_SHIFT,
+ ACL3_getaclres_sz);
+}
+
+static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs3_setaclargs *args)
+{
+ unsigned int base;
+ int error;
+
+ encode_nfs_fh3(xdr, NFS_FH(args->inode));
+ encode_uint32(xdr, args->mask);
+
+ base = req->rq_slen;
+ if (args->npages != 0)
+ xdr_write_pages(xdr, args->pages, 0, args->len);
+ else
+ xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
+
+ error = nfsacl_encode(xdr->buf, base, args->inode,
+ (args->mask & NFS_ACL) ?
+ args->acl_access : NULL, 1, 0);
+ BUG_ON(error < 0);
+ error = nfsacl_encode(xdr->buf, base + error, args->inode,
+ (args->mask & NFS_DFACL) ?
+ args->acl_default : NULL, 1,
+ NFS_ACL_DEFAULT);
+ BUG_ON(error < 0);
+}
+
+#endif /* CONFIG_NFS_V3_ACL */
+
+/*
+ * NFSv3 XDR decode functions
+ *
+ * NFSv3 result types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ */
+
+/*
+ * 3.3.1 GETATTR3res
+ *
+ * struct GETATTR3resok {
+ * fattr3 obj_attributes;
+ * };
+ *
+ * union GETATTR3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * GETATTR3resok resok;
+ * default:
+ * void;
+ * };
+ */
+static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fattr *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_fattr3(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.2 SETATTR3res
+ *
+ * struct SETATTR3resok {
+ * wcc_data obj_wcc;
+ * };
+ *
+ * struct SETATTR3resfail {
+ * wcc_data obj_wcc;
+ * };
+ *
+ * union SETATTR3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * SETATTR3resok resok;
+ * default:
+ * SETATTR3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fattr *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.3 LOOKUP3res
+ *
+ * struct LOOKUP3resok {
+ * nfs_fh3 object;
+ * post_op_attr obj_attributes;
+ * post_op_attr dir_attributes;
+ * };
+ *
+ * struct LOOKUP3resfail {
+ * post_op_attr dir_attributes;
+ * };
+ *
+ * union LOOKUP3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * LOOKUP3resok resok;
+ * default:
+ * LOOKUP3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_diropres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_nfs_fh3(xdr, result->fh);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->dir_attr);
+out:
+ return error;
+out_default:
+ error = decode_post_op_attr(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.4 ACCESS3res
+ *
+ * struct ACCESS3resok {
+ * post_op_attr obj_attributes;
+ * uint32 access;
+ * };
+ *
+ * struct ACCESS3resfail {
+ * post_op_attr obj_attributes;
+ * };
+ *
+ * union ACCESS3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * ACCESS3resok resok;
+ * default:
+ * ACCESS3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_accessres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_uint32(xdr, &result->access);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.5 READLINK3res
+ *
+ * struct READLINK3resok {
+ * post_op_attr symlink_attributes;
+ * nfspath3 data;
+ * };
+ *
+ * struct READLINK3resfail {
+ * post_op_attr symlink_attributes;
+ * };
+ *
+ * union READLINK3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * READLINK3resok resok;
+ * default:
+ * READLINK3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fattr *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_nfspath3(xdr);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.6 READ3res
+ *
+ * struct READ3resok {
+ * post_op_attr file_attributes;
+ * count3 count;
+ * bool eof;
+ * opaque data<>;
+ * };
+ *
+ * struct READ3resfail {
+ * post_op_attr file_attributes;
+ * };
+ *
+ * union READ3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * READ3resok resok;
+ * default:
+ * READ3resfail resfail;
+ * };
+ */
+static int decode_read3resok(struct xdr_stream *xdr,
+ struct nfs_readres *result)
+{
+ u32 eof, count, ocount, recvd;
+ size_t hdrlen;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4 + 4 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ count = be32_to_cpup(p++);
+ eof = be32_to_cpup(p++);
+ ocount = be32_to_cpup(p++);
+ if (unlikely(ocount != count))
+ goto out_mismatch;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(count > recvd))
+ goto out_cheating;
+
+out:
+ xdr_read_pages(xdr, count);
+ result->eof = eof;
+ result->count = count;
+ return count;
+out_mismatch:
+ dprintk("NFS: READ count doesn't match length of opaque: "
+ "count %u != ocount %u\n", count, ocount);
+ return -EIO;
+out_cheating:
+ dprintk("NFS: server cheating in read result: "
+ "count %u > recvd %u\n", count, recvd);
+ count = recvd;
+ eof = 0;
+ goto out;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_readres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_read3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.7 WRITE3res
+ *
+ * enum stable_how {
+ * UNSTABLE = 0,
+ * DATA_SYNC = 1,
+ * FILE_SYNC = 2
+ * };
+ *
+ * struct WRITE3resok {
+ * wcc_data file_wcc;
+ * count3 count;
+ * stable_how committed;
+ * writeverf3 verf;
+ * };
+ *
+ * struct WRITE3resfail {
+ * wcc_data file_wcc;
+ * };
+ *
+ * union WRITE3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * WRITE3resok resok;
+ * default:
+ * WRITE3resfail resfail;
+ * };
+ */
+static int decode_write3resok(struct xdr_stream *xdr,
+ struct nfs_writeres *result)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ result->count = be32_to_cpup(p++);
+ result->verf->committed = be32_to_cpup(p++);
+ if (unlikely(result->verf->committed > NFS_FILE_SYNC))
+ goto out_badvalue;
+ memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
+ return result->count;
+out_badvalue:
+ dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_writeres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_write3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.8 CREATE3res
+ *
+ * struct CREATE3resok {
+ * post_op_fh3 obj;
+ * post_op_attr obj_attributes;
+ * wcc_data dir_wcc;
+ * };
+ *
+ * struct CREATE3resfail {
+ * wcc_data dir_wcc;
+ * };
+ *
+ * union CREATE3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * CREATE3resok resok;
+ * default:
+ * CREATE3resfail resfail;
+ * };
+ */
+static int decode_create3resok(struct xdr_stream *xdr,
+ struct nfs3_diropres *result)
+{
+ int error;
+
+ error = decode_post_op_fh3(xdr, result->fh);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ /* The server isn't required to return a file handle.
+ * If it didn't, force the client to perform a LOOKUP
+ * to determine the correct file handle and attribute
+ * values for the new object. */
+ if (result->fh->size == 0)
+ result->fattr->valid = 0;
+ error = decode_wcc_data(xdr, result->dir_attr);
+out:
+ return error;
+}
+
+static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_diropres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_create3resok(xdr, result);
+out:
+ return error;
+out_default:
+ error = decode_wcc_data(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.12 REMOVE3res
+ *
+ * struct REMOVE3resok {
+ * wcc_data dir_wcc;
+ * };
+ *
+ * struct REMOVE3resfail {
+ * wcc_data dir_wcc;
+ * };
+ *
+ * union REMOVE3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * REMOVE3resok resok;
+ * default:
+ * REMOVE3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_removeres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.14 RENAME3res
+ *
+ * struct RENAME3resok {
+ * wcc_data fromdir_wcc;
+ * wcc_data todir_wcc;
+ * };
+ *
+ * struct RENAME3resfail {
+ * wcc_data fromdir_wcc;
+ * wcc_data todir_wcc;
+ * };
+ *
+ * union RENAME3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * RENAME3resok resok;
+ * default:
+ * RENAME3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_renameres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->old_fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->new_fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.15 LINK3res
+ *
+ * struct LINK3resok {
+ * post_op_attr file_attributes;
+ * wcc_data linkdir_wcc;
+ * };
+ *
+ * struct LINK3resfail {
+ * post_op_attr file_attributes;
+ * wcc_data linkdir_wcc;
+ * };
+ *
+ * union LINK3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * LINK3resok resok;
+ * default:
+ * LINK3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs3_linkres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/**
+ * nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in
+ * the local page cache
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 3.3.16 entry3
+ *
+ * struct entry3 {
+ * fileid3 fileid;
+ * filename3 name;
+ * cookie3 cookie;
+ * fhandle3 filehandle;
+ * post_op_attr3 attributes;
+ * entry3 *nextentry;
+ * };
+ *
+ * 3.3.17 entryplus3
+ * struct entryplus3 {
+ * fileid3 fileid;
+ * filename3 name;
+ * cookie3 cookie;
+ * post_op_attr name_attributes;
+ * post_op_fh3 name_handle;
+ * entryplus3 *nextentry;
+ * };
+ */
+int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ int plus)
+{
+ struct nfs_entry old = *entry;
+ __be32 *p;
+ int error;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p == xdr_zero) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p == xdr_zero)
+ return -EAGAIN;
+ entry->eof = 1;
+ return -EBADCOOKIE;
+ }
+
+ error = decode_fileid3(xdr, &entry->ino);
+ if (unlikely(error))
+ return error;
+
+ error = decode_inline_filename3(xdr, &entry->name, &entry->len);
+ if (unlikely(error))
+ return error;
+
+ entry->prev_cookie = entry->cookie;
+ error = decode_cookie3(xdr, &entry->cookie);
+ if (unlikely(error))
+ return error;
+
+ entry->d_type = DT_UNKNOWN;
+
+ if (plus) {
+ entry->fattr->valid = 0;
+ error = decode_post_op_attr(xdr, entry->fattr);
+ if (unlikely(error))
+ return error;
+ if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
+ entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
+
+ /* In fact, a post_op_fh3: */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ if (*p != xdr_zero) {
+ error = decode_nfs_fh3(xdr, entry->fh);
+ if (unlikely(error)) {
+ if (error == -E2BIG)
+ goto out_truncated;
+ return error;
+ }
+ } else
+ zero_nfs_fh3(entry->fh);
+ }
+
+ return 0;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EAGAIN;
+out_truncated:
+ dprintk("NFS: directory entry contains invalid file handle\n");
+ *entry = old;
+ return -EAGAIN;
+}
+
+/*
+ * 3.3.16 READDIR3res
+ *
+ * struct dirlist3 {
+ * entry3 *entries;
+ * bool eof;
+ * };
+ *
+ * struct READDIR3resok {
+ * post_op_attr dir_attributes;
+ * cookieverf3 cookieverf;
+ * dirlist3 reply;
+ * };
+ *
+ * struct READDIR3resfail {
+ * post_op_attr dir_attributes;
+ * };
+ *
+ * union READDIR3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * READDIR3resok resok;
+ * default:
+ * READDIR3resfail resfail;
+ * };
+ *
+ * Read the directory contents into the page cache, but otherwise
+ * don't touch them. The actual decoding is done by nfs3_decode_entry()
+ * during subsequent nfs_readdir() calls.
+ */
+static int decode_dirlist3(struct xdr_stream *xdr)
+{
+ u32 recvd, pglen;
+ size_t hdrlen;
+
+ pglen = xdr->buf->page_len;
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+ recvd = xdr->buf->len - hdrlen;
+ if (unlikely(pglen > recvd))
+ goto out_cheating;
+out:
+ xdr_read_pages(xdr, pglen);
+ return pglen;
+out_cheating:
+ dprintk("NFS: server cheating in readdir result: "
+ "pglen %u > recvd %u\n", pglen, recvd);
+ pglen = recvd;
+ goto out;
+}
+
+static int decode_readdir3resok(struct xdr_stream *xdr,
+ struct nfs3_readdirres *result)
+{
+ int error;
+
+ error = decode_post_op_attr(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ /* XXX: do we need to check if result->verf != NULL ? */
+ error = decode_cookieverf3(xdr, result->verf);
+ if (unlikely(error))
+ goto out;
+ error = decode_dirlist3(xdr);
+out:
+ return error;
+}
+
+static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_readdirres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_readdir3resok(xdr, result);
+out:
+ return error;
+out_default:
+ error = decode_post_op_attr(xdr, result->dir_attr);
+ if (unlikely(error))
+ goto out;
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.18 FSSTAT3res
+ *
+ * struct FSSTAT3resok {
+ * post_op_attr obj_attributes;
+ * size3 tbytes;
+ * size3 fbytes;
+ * size3 abytes;
+ * size3 tfiles;
+ * size3 ffiles;
+ * size3 afiles;
+ * uint32 invarsec;
+ * };
+ *
+ * struct FSSTAT3resfail {
+ * post_op_attr obj_attributes;
+ * };
+ *
+ * union FSSTAT3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * FSSTAT3resok resok;
+ * default:
+ * FSSTAT3resfail resfail;
+ * };
+ */
+static int decode_fsstat3resok(struct xdr_stream *xdr,
+ struct nfs_fsstat *result)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 8 * 6 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ p = xdr_decode_size3(p, &result->tbytes);
+ p = xdr_decode_size3(p, &result->fbytes);
+ p = xdr_decode_size3(p, &result->abytes);
+ p = xdr_decode_size3(p, &result->tfiles);
+ p = xdr_decode_size3(p, &result->ffiles);
+ xdr_decode_size3(p, &result->afiles);
+ /* ignore invarsec */
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fsstat *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_fsstat3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.19 FSINFO3res
+ *
+ * struct FSINFO3resok {
+ * post_op_attr obj_attributes;
+ * uint32 rtmax;
+ * uint32 rtpref;
+ * uint32 rtmult;
+ * uint32 wtmax;
+ * uint32 wtpref;
+ * uint32 wtmult;
+ * uint32 dtpref;
+ * size3 maxfilesize;
+ * nfstime3 time_delta;
+ * uint32 properties;
+ * };
+ *
+ * struct FSINFO3resfail {
+ * post_op_attr obj_attributes;
+ * };
+ *
+ * union FSINFO3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * FSINFO3resok resok;
+ * default:
+ * FSINFO3resfail resfail;
+ * };
+ */
+static int decode_fsinfo3resok(struct xdr_stream *xdr,
+ struct nfs_fsinfo *result)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ result->rtmax = be32_to_cpup(p++);
+ result->rtpref = be32_to_cpup(p++);
+ result->rtmult = be32_to_cpup(p++);
+ result->wtmax = be32_to_cpup(p++);
+ result->wtpref = be32_to_cpup(p++);
+ result->wtmult = be32_to_cpup(p++);
+ result->dtpref = be32_to_cpup(p++);
+ p = xdr_decode_size3(p, &result->maxfilesize);
+ xdr_decode_nfstime3(p, &result->time_delta);
+
+ /* ignore properties */
+ result->lease_time = 0;
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fsinfo *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_fsinfo3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.20 PATHCONF3res
+ *
+ * struct PATHCONF3resok {
+ * post_op_attr obj_attributes;
+ * uint32 linkmax;
+ * uint32 name_max;
+ * bool no_trunc;
+ * bool chown_restricted;
+ * bool case_insensitive;
+ * bool case_preserving;
+ * };
+ *
+ * struct PATHCONF3resfail {
+ * post_op_attr obj_attributes;
+ * };
+ *
+ * union PATHCONF3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * PATHCONF3resok resok;
+ * default:
+ * PATHCONF3resfail resfail;
+ * };
+ */
+static int decode_pathconf3resok(struct xdr_stream *xdr,
+ struct nfs_pathconf *result)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4 * 6);
+ if (unlikely(p == NULL))
+ goto out_overflow;
+ result->max_link = be32_to_cpup(p++);
+ result->max_namelen = be32_to_cpup(p);
+ /* ignore remaining fields */
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_pathconf *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_pathconf3resok(xdr, result);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.21 COMMIT3res
+ *
+ * struct COMMIT3resok {
+ * wcc_data file_wcc;
+ * writeverf3 verf;
+ * };
+ *
+ * struct COMMIT3resfail {
+ * wcc_data file_wcc;
+ * };
+ *
+ * union COMMIT3res switch (nfsstat3 status) {
+ * case NFS3_OK:
+ * COMMIT3resok resok;
+ * default:
+ * COMMIT3resfail resfail;
+ * };
+ */
+static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_writeres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ error = decode_wcc_data(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_status;
+ error = decode_writeverf3(xdr, result->verf->verifier);
+out:
+ return error;
+out_status:
+ return nfs_stat_to_errno(status);
+}
+
+#ifdef CONFIG_NFS_V3_ACL
+
+static inline int decode_getacl3resok(struct xdr_stream *xdr,
+ struct nfs3_getaclres *result)
+{
+ struct posix_acl **acl;
+ unsigned int *aclcnt;
+ size_t hdrlen;
+ int error;
+
+ error = decode_post_op_attr(xdr, result->fattr);
+ if (unlikely(error))
+ goto out;
+ error = decode_uint32(xdr, &result->mask);
+ if (unlikely(error))
+ goto out;
+ error = -EINVAL;
+ if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+ goto out;
+
+ hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+
+ acl = NULL;
+ if (result->mask & NFS_ACL)
+ acl = &result->acl_access;
+ aclcnt = NULL;
+ if (result->mask & NFS_ACLCNT)
+ aclcnt = &result->acl_access_count;
+ error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl);
+ if (unlikely(error <= 0))
+ goto out;
+
+ acl = NULL;
+ if (result->mask & NFS_DFACL)
+ acl = &result->acl_default;
+ aclcnt = NULL;
+ if (result->mask & NFS_DFACLCNT)
+ aclcnt = &result->acl_default_count;
+ error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl);
+ if (unlikely(error <= 0))
+ return error;
+ error = 0;
+out:
+ return error;
+}
+
+static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs3_getaclres *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_getacl3resok(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fattr *result)
+{
+ enum nfs_stat status;
+ int error;
+
+ error = decode_nfsstat3(xdr, &status);
+ if (unlikely(error))
+ goto out;
+ if (status != NFS3_OK)
+ goto out_default;
+ error = decode_post_op_attr(xdr, result);
+out:
+ return error;
+out_default:
+ return nfs_stat_to_errno(status);
+}
+
+#endif /* CONFIG_NFS_V3_ACL */
+
+#define PROC(proc, argtype, restype, timer) \
+[NFS3PROC_##proc] = { \
+ .p_proc = NFS3PROC_##proc, \
+ .p_encode = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args, \
+ .p_decode = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res, \
+ .p_arglen = NFS3_##argtype##args_sz, \
+ .p_replen = NFS3_##restype##res_sz, \
+ .p_timer = timer, \
+ .p_statidx = NFS3PROC_##proc, \
+ .p_name = #proc, \
+ }
+
+struct rpc_procinfo nfs3_procedures[] = {
+ PROC(GETATTR, getattr, getattr, 1),
+ PROC(SETATTR, setattr, setattr, 0),
+ PROC(LOOKUP, lookup, lookup, 2),
+ PROC(ACCESS, access, access, 1),
+ PROC(READLINK, readlink, readlink, 3),
+ PROC(READ, read, read, 3),
+ PROC(WRITE, write, write, 4),
+ PROC(CREATE, create, create, 0),
+ PROC(MKDIR, mkdir, create, 0),
+ PROC(SYMLINK, symlink, create, 0),
+ PROC(MKNOD, mknod, create, 0),
+ PROC(REMOVE, remove, remove, 0),
+ PROC(RMDIR, lookup, setattr, 0),
+ PROC(RENAME, rename, rename, 0),
+ PROC(LINK, link, link, 0),
+ PROC(READDIR, readdir, readdir, 3),
+ PROC(READDIRPLUS, readdirplus, readdir, 3),
+ PROC(FSSTAT, getattr, fsstat, 0),
+ PROC(FSINFO, getattr, fsinfo, 0),
+ PROC(PATHCONF, getattr, pathconf, 0),
+ PROC(COMMIT, commit, commit, 5),
+};
+
+const struct rpc_version nfs_version3 = {
+ .number = 3,
+ .nrprocs = ARRAY_SIZE(nfs3_procedures),
+ .procs = nfs3_procedures
+};
+
+#ifdef CONFIG_NFS_V3_ACL
+static struct rpc_procinfo nfs3_acl_procedures[] = {
+ [ACLPROC3_GETACL] = {
+ .p_proc = ACLPROC3_GETACL,
+ .p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
+ .p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
+ .p_arglen = ACL3_getaclargs_sz,
+ .p_replen = ACL3_getaclres_sz,
+ .p_timer = 1,
+ .p_name = "GETACL",
+ },
+ [ACLPROC3_SETACL] = {
+ .p_proc = ACLPROC3_SETACL,
+ .p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
+ .p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
+ .p_arglen = ACL3_setaclargs_sz,
+ .p_replen = ACL3_setaclres_sz,
+ .p_timer = 0,
+ .p_name = "SETACL",
+ },
+};
+
+const struct rpc_version nfsacl_version3 = {
+ .number = 3,
+ .nrprocs = sizeof(nfs3_acl_procedures)/
+ sizeof(nfs3_acl_procedures[0]),
+ .procs = nfs3_acl_procedures,
+};
+#endif /* CONFIG_NFS_V3_ACL */
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
new file mode 100644
index 00000000..8d750210
--- /dev/null
+++ b/fs/nfs/nfs4_fs.h
@@ -0,0 +1,379 @@
+/*
+ * linux/fs/nfs/nfs4_fs.h
+ *
+ * Copyright (C) 2005 Trond Myklebust
+ *
+ * NFSv4-specific filesystem definitions and declarations
+ */
+
+#ifndef __LINUX_FS_NFS_NFS4_FS_H
+#define __LINUX_FS_NFS_NFS4_FS_H
+
+#ifdef CONFIG_NFS_V4
+
+struct idmap;
+
+enum nfs4_client_state {
+ NFS4CLNT_MANAGER_RUNNING = 0,
+ NFS4CLNT_CHECK_LEASE,
+ NFS4CLNT_LEASE_EXPIRED,
+ NFS4CLNT_RECLAIM_REBOOT,
+ NFS4CLNT_RECLAIM_NOGRACE,
+ NFS4CLNT_DELEGRETURN,
+ NFS4CLNT_SESSION_RESET,
+ NFS4CLNT_RECALL_SLOT,
+ NFS4CLNT_LEASE_CONFIRM,
+ NFS4CLNT_SERVER_SCOPE_MISMATCH,
+};
+
+enum nfs4_session_state {
+ NFS4_SESSION_INITING,
+ NFS4_SESSION_DRAINING,
+};
+
+#define NFS4_RENEW_TIMEOUT 0x01
+#define NFS4_RENEW_DELEGATION_CB 0x02
+
+struct nfs4_minor_version_ops {
+ u32 minor_version;
+
+ int (*call_sync)(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply);
+ bool (*match_stateid)(const nfs4_stateid *,
+ const nfs4_stateid *);
+ int (*find_root_sec)(struct nfs_server *, struct nfs_fh *,
+ struct nfs_fsinfo *);
+ const struct nfs4_state_recovery_ops *reboot_recovery_ops;
+ const struct nfs4_state_recovery_ops *nograce_recovery_ops;
+ const struct nfs4_state_maintenance_ops *state_renewal_ops;
+};
+
+struct nfs_unique_id {
+ struct rb_node rb_node;
+ __u64 id;
+};
+
+#define NFS_SEQID_CONFIRMED 1
+struct nfs_seqid_counter {
+ ktime_t create_time;
+ int owner_id;
+ int flags;
+ u32 counter;
+ spinlock_t lock; /* Protects the list */
+ struct list_head list; /* Defines sequence of RPC calls */
+ struct rpc_wait_queue wait; /* RPC call delay queue */
+};
+
+struct nfs_seqid {
+ struct nfs_seqid_counter *sequence;
+ struct list_head list;
+ struct rpc_task *task;
+};
+
+static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status)
+{
+ if (seqid_mutating_err(-status))
+ seqid->flags |= NFS_SEQID_CONFIRMED;
+}
+
+/*
+ * NFS4 state_owners and lock_owners are simply labels for ordered
+ * sequences of RPC calls. Their sole purpose is to provide once-only
+ * semantics by allowing the server to identify replayed requests.
+ */
+struct nfs4_state_owner {
+ struct nfs_server *so_server;
+ struct list_head so_lru;
+ unsigned long so_expires;
+ struct rb_node so_server_node;
+
+ struct rpc_cred *so_cred; /* Associated cred */
+
+ spinlock_t so_lock;
+ atomic_t so_count;
+ unsigned long so_flags;
+ struct list_head so_states;
+ struct nfs_seqid_counter so_seqid;
+};
+
+enum {
+ NFS_OWNER_RECLAIM_REBOOT,
+ NFS_OWNER_RECLAIM_NOGRACE
+};
+
+#define NFS_LOCK_NEW 0
+#define NFS_LOCK_RECLAIM 1
+#define NFS_LOCK_EXPIRED 2
+
+/*
+ * struct nfs4_state maintains the client-side state for a given
+ * (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
+ *
+ * OPEN:
+ * In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server,
+ * we need to know how many files are open for reading or writing on a
+ * given inode. This information too is stored here.
+ *
+ * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
+ */
+
+struct nfs4_lock_owner {
+ unsigned int lo_type;
+#define NFS4_ANY_LOCK_TYPE (0U)
+#define NFS4_FLOCK_LOCK_TYPE (1U << 0)
+#define NFS4_POSIX_LOCK_TYPE (1U << 1)
+ union {
+ fl_owner_t posix_owner;
+ pid_t flock_owner;
+ } lo_u;
+};
+
+struct nfs4_lock_state {
+ struct list_head ls_locks; /* Other lock stateids */
+ struct nfs4_state * ls_state; /* Pointer to open state */
+#define NFS_LOCK_INITIALIZED 1
+ int ls_flags;
+ struct nfs_seqid_counter ls_seqid;
+ nfs4_stateid ls_stateid;
+ atomic_t ls_count;
+ struct nfs4_lock_owner ls_owner;
+};
+
+/* bits for nfs4_state->flags */
+enum {
+ LK_STATE_IN_USE,
+ NFS_DELEGATED_STATE, /* Current stateid is delegation */
+ NFS_O_RDONLY_STATE, /* OPEN stateid has read-only state */
+ NFS_O_WRONLY_STATE, /* OPEN stateid has write-only state */
+ NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
+ NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
+ NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
+ NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
+};
+
+struct nfs4_state {
+ struct list_head open_states; /* List of states for the same state_owner */
+ struct list_head inode_states; /* List of states for the same inode */
+ struct list_head lock_states; /* List of subservient lock stateids */
+
+ struct nfs4_state_owner *owner; /* Pointer to the open owner */
+ struct inode *inode; /* Pointer to the inode */
+
+ unsigned long flags; /* Do we hold any locks? */
+ spinlock_t state_lock; /* Protects the lock_states list */
+
+ seqlock_t seqlock; /* Protects the stateid/open_stateid */
+ nfs4_stateid stateid; /* Current stateid: may be delegation */
+ nfs4_stateid open_stateid; /* OPEN stateid */
+
+ /* The following 3 fields are protected by owner->so_lock */
+ unsigned int n_rdonly; /* Number of read-only references */
+ unsigned int n_wronly; /* Number of write-only references */
+ unsigned int n_rdwr; /* Number of read/write references */
+ fmode_t state; /* State on the server (R,W, or RW) */
+ atomic_t count;
+};
+
+
+struct nfs4_exception {
+ long timeout;
+ int retry;
+ struct nfs4_state *state;
+ struct inode *inode;
+};
+
+struct nfs4_state_recovery_ops {
+ int owner_flag_bit;
+ int state_flag_bit;
+ int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
+ int (*recover_lock)(struct nfs4_state *, struct file_lock *);
+ int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
+ struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
+ int (*reclaim_complete)(struct nfs_client *);
+};
+
+struct nfs4_state_maintenance_ops {
+ int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
+ struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
+ int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
+};
+
+extern const struct dentry_operations nfs4_dentry_operations;
+extern const struct inode_operations nfs4_dir_inode_operations;
+
+/* nfs4namespace.c */
+struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
+
+/* nfs4proc.c */
+extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
+extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
+extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
+extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
+extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
+extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
+extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *,
+ struct nfs4_fs_locations *, struct page *);
+extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *,
+ struct nfs_fh *, struct nfs_fattr *);
+extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
+extern int nfs4_release_lockowner(struct nfs4_lock_state *);
+extern const struct xattr_handler *nfs4_xattr_handlers[];
+
+#if defined(CONFIG_NFS_V4_1)
+static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
+{
+ return server->nfs_client->cl_session;
+}
+
+extern bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy);
+extern int nfs4_setup_sequence(const struct nfs_server *server,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ struct rpc_task *task);
+extern int nfs41_setup_sequence(struct nfs4_session *session,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ struct rpc_task *task);
+extern void nfs4_destroy_session(struct nfs4_session *session);
+extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
+extern int nfs4_proc_create_session(struct nfs_client *);
+extern int nfs4_proc_destroy_session(struct nfs4_session *);
+extern int nfs4_init_session(struct nfs_server *server);
+extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
+ struct nfs_fsinfo *fsinfo);
+extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
+ bool sync);
+
+static inline bool
+is_ds_only_client(struct nfs_client *clp)
+{
+ return (clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) ==
+ EXCHGID4_FLAG_USE_PNFS_DS;
+}
+
+static inline bool
+is_ds_client(struct nfs_client *clp)
+{
+ return clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_DS;
+}
+#else /* CONFIG_NFS_v4_1 */
+static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
+{
+ return NULL;
+}
+
+static inline int nfs4_setup_sequence(const struct nfs_server *server,
+ struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
+ struct rpc_task *task)
+{
+ return 0;
+}
+
+static inline int nfs4_init_session(struct nfs_server *server)
+{
+ return 0;
+}
+
+static inline bool
+is_ds_only_client(struct nfs_client *clp)
+{
+ return false;
+}
+
+static inline bool
+is_ds_client(struct nfs_client *clp)
+{
+ return false;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
+
+extern const u32 nfs4_fattr_bitmap[2];
+extern const u32 nfs4_statfs_bitmap[2];
+extern const u32 nfs4_pathconf_bitmap[2];
+extern const u32 nfs4_fsinfo_bitmap[3];
+extern const u32 nfs4_fs_locations_bitmap[2];
+
+/* nfs4renewd.c */
+extern void nfs4_schedule_state_renewal(struct nfs_client *);
+extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
+extern void nfs4_kill_renewd(struct nfs_client *);
+extern void nfs4_renew_state(struct work_struct *);
+
+/* nfs4state.c */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
+struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
+#if defined(CONFIG_NFS_V4_1)
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
+struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
+extern void nfs4_schedule_session_recovery(struct nfs4_session *);
+#else
+static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
+extern void nfs4_put_state_owner(struct nfs4_state_owner *);
+extern void nfs4_purge_state_owners(struct nfs_server *);
+extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
+extern void nfs4_put_open_state(struct nfs4_state *);
+extern void nfs4_close_state(struct nfs4_state *, fmode_t);
+extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
+extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
+extern void nfs_inode_find_state_and_recover(struct inode *inode,
+ const nfs4_stateid *stateid);
+extern void nfs4_schedule_lease_recovery(struct nfs_client *);
+extern void nfs4_schedule_state_manager(struct nfs_client *);
+extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
+extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
+extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
+extern void nfs41_handle_recall_slot(struct nfs_client *clp);
+extern void nfs41_handle_server_scope(struct nfs_client *,
+ struct server_scope **);
+extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
+extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
+extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
+ fmode_t, fl_owner_t, pid_t);
+
+extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
+extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
+extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
+extern void nfs_release_seqid(struct nfs_seqid *seqid);
+extern void nfs_free_seqid(struct nfs_seqid *seqid);
+
+extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp);
+
+extern const nfs4_stateid zero_stateid;
+
+/* nfs4xdr.c */
+extern struct rpc_procinfo nfs4_procedures[];
+
+struct nfs4_mount_data;
+
+/* callback_xdr.c */
+extern struct svc_version nfs4_callback_version1;
+extern struct svc_version nfs4_callback_version4;
+
+static inline void nfs4_stateid_copy(nfs4_stateid *dst, const nfs4_stateid *src)
+{
+ memcpy(dst, src, sizeof(*dst));
+}
+
+static inline bool nfs4_stateid_match(const nfs4_stateid *dst, const nfs4_stateid *src)
+{
+ return memcmp(dst, src, sizeof(*dst)) == 0;
+}
+
+#else
+
+#define nfs4_close_state(a, b) do { } while (0)
+#define nfs4_close_sync(a, b) do { } while (0)
+
+#endif /* CONFIG_NFS_V4 */
+#endif /* __LINUX_FS_NFS_NFS4_FS.H */
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
new file mode 100644
index 00000000..5acfd9ea
--- /dev/null
+++ b/fs/nfs/nfs4filelayout.c
@@ -0,0 +1,1109 @@
+/*
+ * Module for the pnfs nfs4 file layout driver.
+ * Defines all I/O and Policy interface operations, plus code
+ * to register itself with the pNFS client.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/module.h>
+
+#include <linux/sunrpc/metrics.h>
+
+#include "internal.h"
+#include "delegation.h"
+#include "nfs4filelayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
+MODULE_DESCRIPTION("The NFSv4 file layout driver");
+
+#define FILELAYOUT_POLL_RETRY_MAX (15*HZ)
+
+static loff_t
+filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
+ loff_t offset)
+{
+ u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
+ u64 stripe_no;
+ u32 rem;
+
+ offset -= flseg->pattern_offset;
+ stripe_no = div_u64(offset, stripe_width);
+ div_u64_rem(offset, flseg->stripe_unit, &rem);
+
+ return stripe_no * flseg->stripe_unit + rem;
+}
+
+/* This function is used by the layout driver to calculate the
+ * offset of the file on the dserver based on whether the
+ * layout type is STRIPE_DENSE or STRIPE_SPARSE
+ */
+static loff_t
+filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+
+ switch (flseg->stripe_type) {
+ case STRIPE_SPARSE:
+ return offset;
+
+ case STRIPE_DENSE:
+ return filelayout_get_dense_offset(flseg, offset);
+ }
+
+ BUG();
+}
+
+static int filelayout_async_handle_error(struct rpc_task *task,
+ struct nfs4_state *state,
+ struct nfs_client *clp,
+ int *reset)
+{
+ struct nfs_server *mds_server = NFS_SERVER(state->inode);
+ struct nfs_client *mds_client = mds_server->nfs_client;
+
+ if (task->tk_status >= 0)
+ return 0;
+ *reset = 0;
+
+ switch (task->tk_status) {
+ /* MDS state errors */
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ nfs_remove_bad_delegation(state->inode);
+ case -NFS4ERR_OPENMODE:
+ nfs4_schedule_stateid_recovery(mds_server, state);
+ goto wait_on_recovery;
+ case -NFS4ERR_EXPIRED:
+ nfs4_schedule_stateid_recovery(mds_server, state);
+ nfs4_schedule_lease_recovery(mds_client);
+ goto wait_on_recovery;
+ /* DS session errors */
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session. Exchangeid "
+ "flags 0x%x\n", __func__, task->tk_status,
+ clp->cl_exchange_flags);
+ nfs4_schedule_session_recovery(clp->cl_session);
+ break;
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ case -EKEYEXPIRED:
+ rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
+ break;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ break;
+ default:
+ dprintk("%s DS error. Retry through MDS %d\n", __func__,
+ task->tk_status);
+ *reset = 1;
+ break;
+ }
+out:
+ task->tk_status = 0;
+ return -EAGAIN;
+wait_on_recovery:
+ rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
+ if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
+ rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
+ goto out;
+}
+
+/* NFS_PROTO call done callback routines */
+
+static int filelayout_read_done_cb(struct rpc_task *task,
+ struct nfs_read_data *data)
+{
+ int reset = 0;
+
+ dprintk("%s DS read\n", __func__);
+
+ if (filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, &reset) == -EAGAIN) {
+ dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
+ __func__, data->ds_clp, data->ds_clp->cl_session);
+ if (reset) {
+ pnfs_set_lo_fail(data->lseg);
+ nfs4_reset_read(task, data);
+ }
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+/*
+ * We reference the rpc_cred of the first WRITE that triggers the need for
+ * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
+ * rfc5661 is not clear about which credential should be used.
+ */
+static void
+filelayout_set_layoutcommit(struct nfs_write_data *wdata)
+{
+ if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds ||
+ wdata->res.verf->committed == NFS_FILE_SYNC)
+ return;
+
+ pnfs_set_layoutcommit(wdata);
+ dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino,
+ (unsigned long) NFS_I(wdata->inode)->layout->plh_lwb);
+}
+
+/*
+ * Call ops for the async read/write cases
+ * In the case of dense layouts, the offset needs to be reset to its
+ * original value.
+ */
+static void filelayout_read_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+
+ rdata->read_done_cb = filelayout_read_done_cb;
+
+ if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
+ &rdata->args.seq_args, &rdata->res.seq_res,
+ task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void filelayout_read_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+
+ dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
+
+ /* Note this may cause RPC to be resent */
+ rdata->mds_ops->rpc_call_done(task, data);
+}
+
+static void filelayout_read_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+
+ rpc_count_iostats(task, NFS_SERVER(rdata->inode)->client->cl_metrics);
+}
+
+static void filelayout_read_release(void *data)
+{
+ struct nfs_read_data *rdata = (struct nfs_read_data *)data;
+
+ put_lseg(rdata->lseg);
+ rdata->mds_ops->rpc_release(data);
+}
+
+static int filelayout_write_done_cb(struct rpc_task *task,
+ struct nfs_write_data *data)
+{
+ int reset = 0;
+
+ if (filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, &reset) == -EAGAIN) {
+ dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
+ __func__, data->ds_clp, data->ds_clp->cl_session);
+ if (reset) {
+ pnfs_set_lo_fail(data->lseg);
+ nfs4_reset_write(task, data);
+ }
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ filelayout_set_layoutcommit(data);
+ return 0;
+}
+
+/* Fake up some data that will cause nfs_commit_release to retry the writes. */
+static void prepare_to_resend_writes(struct nfs_write_data *data)
+{
+ struct nfs_page *first = nfs_list_entry(data->pages.next);
+
+ data->task.tk_status = 0;
+ memcpy(data->verf.verifier, first->wb_verf.verifier,
+ sizeof(first->wb_verf.verifier));
+ data->verf.verifier[0]++; /* ensure verifier mismatch */
+}
+
+static int filelayout_commit_done_cb(struct rpc_task *task,
+ struct nfs_write_data *data)
+{
+ int reset = 0;
+
+ if (filelayout_async_handle_error(task, data->args.context->state,
+ data->ds_clp, &reset) == -EAGAIN) {
+ dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n",
+ __func__, data->ds_clp, data->ds_clp->cl_session);
+ if (reset) {
+ prepare_to_resend_writes(data);
+ pnfs_set_lo_fail(data->lseg);
+ } else
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void filelayout_write_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
+ &wdata->args.seq_args, &wdata->res.seq_res,
+ task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static void filelayout_write_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ /* Note this may cause RPC to be resent */
+ wdata->mds_ops->rpc_call_done(task, data);
+}
+
+static void filelayout_write_count_stats(struct rpc_task *task, void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ rpc_count_iostats(task, NFS_SERVER(wdata->inode)->client->cl_metrics);
+}
+
+static void filelayout_write_release(void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ put_lseg(wdata->lseg);
+ wdata->mds_ops->rpc_release(data);
+}
+
+static void filelayout_commit_release(void *data)
+{
+ struct nfs_write_data *wdata = (struct nfs_write_data *)data;
+
+ nfs_commit_release_pages(wdata);
+ if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding))
+ nfs_commit_clear_lock(NFS_I(wdata->inode));
+ put_lseg(wdata->lseg);
+ nfs_commitdata_release(wdata);
+}
+
+static const struct rpc_call_ops filelayout_read_call_ops = {
+ .rpc_call_prepare = filelayout_read_prepare,
+ .rpc_call_done = filelayout_read_call_done,
+ .rpc_count_stats = filelayout_read_count_stats,
+ .rpc_release = filelayout_read_release,
+};
+
+static const struct rpc_call_ops filelayout_write_call_ops = {
+ .rpc_call_prepare = filelayout_write_prepare,
+ .rpc_call_done = filelayout_write_call_done,
+ .rpc_count_stats = filelayout_write_count_stats,
+ .rpc_release = filelayout_write_release,
+};
+
+static const struct rpc_call_ops filelayout_commit_call_ops = {
+ .rpc_call_prepare = filelayout_write_prepare,
+ .rpc_call_done = filelayout_write_call_done,
+ .rpc_count_stats = filelayout_write_count_stats,
+ .rpc_release = filelayout_commit_release,
+};
+
+static enum pnfs_try_status
+filelayout_read_pagelist(struct nfs_read_data *data)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ loff_t offset = data->args.offset;
+ u32 j, idx;
+ struct nfs_fh *fh;
+ int status;
+
+ dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
+ __func__, data->inode->i_ino,
+ data->args.pgbase, (size_t)data->args.count, offset);
+
+ if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
+ return PNFS_NOT_ATTEMPTED;
+
+ /* Retrieve the correct rpc_client for the byte range */
+ j = nfs4_fl_calc_j_index(lseg, offset);
+ idx = nfs4_fl_calc_ds_index(lseg, j);
+ ds = nfs4_fl_prepare_ds(lseg, idx);
+ if (!ds) {
+ /* Either layout fh index faulty, or ds connect failed */
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ return PNFS_NOT_ATTEMPTED;
+ }
+ dprintk("%s USE DS: %s\n", __func__, ds->ds_remotestr);
+
+ /* No multipath support. Use first DS */
+ data->ds_clp = ds->ds_clp;
+ fh = nfs4_fl_select_ds_fh(lseg, j);
+ if (fh)
+ data->args.fh = fh;
+
+ data->args.offset = filelayout_get_dserver_offset(lseg, offset);
+ data->mds_offset = offset;
+
+ /* Perform an asynchronous read to ds */
+ status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient,
+ &filelayout_read_call_ops);
+ BUG_ON(status != 0);
+ return PNFS_ATTEMPTED;
+}
+
+/* Perform async writes. */
+static enum pnfs_try_status
+filelayout_write_pagelist(struct nfs_write_data *data, int sync)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ loff_t offset = data->args.offset;
+ u32 j, idx;
+ struct nfs_fh *fh;
+ int status;
+
+ if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
+ return PNFS_NOT_ATTEMPTED;
+
+ /* Retrieve the correct rpc_client for the byte range */
+ j = nfs4_fl_calc_j_index(lseg, offset);
+ idx = nfs4_fl_calc_ds_index(lseg, j);
+ ds = nfs4_fl_prepare_ds(lseg, idx);
+ if (!ds) {
+ printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
+ __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ return PNFS_NOT_ATTEMPTED;
+ }
+ dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s\n", __func__,
+ data->inode->i_ino, sync, (size_t) data->args.count, offset,
+ ds->ds_remotestr);
+
+ data->write_done_cb = filelayout_write_done_cb;
+ data->ds_clp = ds->ds_clp;
+ fh = nfs4_fl_select_ds_fh(lseg, j);
+ if (fh)
+ data->args.fh = fh;
+ /*
+ * Get the file offset on the dserver. Set the write offset to
+ * this offset and save the original offset.
+ */
+ data->args.offset = filelayout_get_dserver_offset(lseg, offset);
+
+ /* Perform an asynchronous write */
+ status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient,
+ &filelayout_write_call_ops, sync);
+ BUG_ON(status != 0);
+ return PNFS_ATTEMPTED;
+}
+
+/*
+ * filelayout_check_layout()
+ *
+ * Make sure layout segment parameters are sane WRT the device.
+ * At this point no generic layer initialization of the lseg has occurred,
+ * and nothing has been added to the layout_hdr cache.
+ *
+ */
+static int
+filelayout_check_layout(struct pnfs_layout_hdr *lo,
+ struct nfs4_filelayout_segment *fl,
+ struct nfs4_layoutget_res *lgr,
+ struct nfs4_deviceid *id,
+ gfp_t gfp_flags)
+{
+ struct nfs4_deviceid_node *d;
+ struct nfs4_file_layout_dsaddr *dsaddr;
+ int status = -EINVAL;
+ struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
+
+ dprintk("--> %s\n", __func__);
+
+ /* FIXME: remove this check when layout segment support is added */
+ if (lgr->range.offset != 0 ||
+ lgr->range.length != NFS4_MAX_UINT64) {
+ dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
+ __func__);
+ goto out;
+ }
+
+ if (fl->pattern_offset > lgr->range.offset) {
+ dprintk("%s pattern_offset %lld too large\n",
+ __func__, fl->pattern_offset);
+ goto out;
+ }
+
+ if (!fl->stripe_unit || fl->stripe_unit % PAGE_SIZE) {
+ dprintk("%s Invalid stripe unit (%u)\n",
+ __func__, fl->stripe_unit);
+ goto out;
+ }
+
+ /* find and reference the deviceid */
+ d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode)->pnfs_curr_ld,
+ NFS_SERVER(lo->plh_inode)->nfs_client, id);
+ if (d == NULL) {
+ dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
+ if (dsaddr == NULL)
+ goto out;
+ } else
+ dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
+ /* Found deviceid is being reaped */
+ if (test_bit(NFS_DEVICEID_INVALID, &dsaddr->id_node.flags))
+ goto out_put;
+
+ fl->dsaddr = dsaddr;
+
+ if (fl->first_stripe_index >= dsaddr->stripe_count) {
+ dprintk("%s Bad first_stripe_index %u\n",
+ __func__, fl->first_stripe_index);
+ goto out_put;
+ }
+
+ if ((fl->stripe_type == STRIPE_SPARSE &&
+ fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
+ (fl->stripe_type == STRIPE_DENSE &&
+ fl->num_fh != dsaddr->stripe_count)) {
+ dprintk("%s num_fh %u not valid for given packing\n",
+ __func__, fl->num_fh);
+ goto out_put;
+ }
+
+ if (fl->stripe_unit % nfss->rsize || fl->stripe_unit % nfss->wsize) {
+ dprintk("%s Stripe unit (%u) not aligned with rsize %u "
+ "wsize %u\n", __func__, fl->stripe_unit, nfss->rsize,
+ nfss->wsize);
+ }
+
+ status = 0;
+out:
+ dprintk("--> %s returns %d\n", __func__, status);
+ return status;
+out_put:
+ nfs4_fl_put_deviceid(dsaddr);
+ goto out;
+}
+
+static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
+{
+ int i;
+
+ for (i = 0; i < fl->num_fh; i++) {
+ if (!fl->fh_array[i])
+ break;
+ kfree(fl->fh_array[i]);
+ }
+ kfree(fl->fh_array);
+ fl->fh_array = NULL;
+}
+
+static void
+_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
+{
+ filelayout_free_fh_array(fl);
+ kfree(fl);
+}
+
+static int
+filelayout_decode_layout(struct pnfs_layout_hdr *flo,
+ struct nfs4_filelayout_segment *fl,
+ struct nfs4_layoutget_res *lgr,
+ struct nfs4_deviceid *id,
+ gfp_t gfp_flags)
+{
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ __be32 *p;
+ uint32_t nfl_util;
+ int i;
+
+ dprintk("%s: set_layout_map Begin\n", __func__);
+
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ return -ENOMEM;
+
+ xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ /* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
+ * num_fh (4) */
+ p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20);
+ if (unlikely(!p))
+ goto out_err;
+
+ memcpy(id, p, sizeof(*id));
+ p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
+ nfs4_print_deviceid(id);
+
+ nfl_util = be32_to_cpup(p++);
+ if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
+ fl->commit_through_mds = 1;
+ if (nfl_util & NFL4_UFLG_DENSE)
+ fl->stripe_type = STRIPE_DENSE;
+ else
+ fl->stripe_type = STRIPE_SPARSE;
+ fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK;
+
+ fl->first_stripe_index = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &fl->pattern_offset);
+ fl->num_fh = be32_to_cpup(p++);
+
+ dprintk("%s: nfl_util 0x%X num_fh %u fsi %u po %llu\n",
+ __func__, nfl_util, fl->num_fh, fl->first_stripe_index,
+ fl->pattern_offset);
+
+ /* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
+ * Futher checking is done in filelayout_check_layout */
+ if (fl->num_fh >
+ max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
+ goto out_err;
+
+ if (fl->num_fh > 0) {
+ fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
+ gfp_flags);
+ if (!fl->fh_array)
+ goto out_err;
+ }
+
+ for (i = 0; i < fl->num_fh; i++) {
+ /* Do we want to use a mempool here? */
+ fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
+ if (!fl->fh_array[i])
+ goto out_err_free;
+
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free;
+ fl->fh_array[i]->size = be32_to_cpup(p++);
+ if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+ printk(KERN_ERR "NFS: Too big fh %d received %d\n",
+ i, fl->fh_array[i]->size);
+ goto out_err_free;
+ }
+
+ p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
+ if (unlikely(!p))
+ goto out_err_free;
+ memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
+ dprintk("DEBUG: %s: fh len %d\n", __func__,
+ fl->fh_array[i]->size);
+ }
+
+ __free_page(scratch);
+ return 0;
+
+out_err_free:
+ filelayout_free_fh_array(fl);
+out_err:
+ __free_page(scratch);
+ return -EIO;
+}
+
+static void
+filelayout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+
+ dprintk("--> %s\n", __func__);
+ nfs4_fl_put_deviceid(fl->dsaddr);
+ kfree(fl->commit_buckets);
+ _filelayout_free_lseg(fl);
+}
+
+static struct pnfs_layout_segment *
+filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
+ struct nfs4_layoutget_res *lgr,
+ gfp_t gfp_flags)
+{
+ struct nfs4_filelayout_segment *fl;
+ int rc;
+ struct nfs4_deviceid id;
+
+ dprintk("--> %s\n", __func__);
+ fl = kzalloc(sizeof(*fl), gfp_flags);
+ if (!fl)
+ return NULL;
+
+ rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
+ if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
+ _filelayout_free_lseg(fl);
+ return NULL;
+ }
+
+ /* This assumes there is only one IOMODE_RW lseg. What
+ * we really want to do is have a layout_hdr level
+ * dictionary of <multipath_list4, fh> keys, each
+ * associated with a struct list_head, populated by calls
+ * to filelayout_write_pagelist().
+ * */
+ if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
+ int i;
+ int size = (fl->stripe_type == STRIPE_SPARSE) ?
+ fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
+
+ fl->commit_buckets = kcalloc(size, sizeof(struct nfs4_fl_commit_bucket), gfp_flags);
+ if (!fl->commit_buckets) {
+ filelayout_free_lseg(&fl->generic_hdr);
+ return NULL;
+ }
+ fl->number_of_buckets = size;
+ for (i = 0; i < size; i++) {
+ INIT_LIST_HEAD(&fl->commit_buckets[i].written);
+ INIT_LIST_HEAD(&fl->commit_buckets[i].committing);
+ }
+ }
+ return &fl->generic_hdr;
+}
+
+/*
+ * filelayout_pg_test(). Called by nfs_can_coalesce_requests()
+ *
+ * return true : coalesce page
+ * return false : don't coalesce page
+ */
+static bool
+filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ struct nfs_page *req)
+{
+ u64 p_stripe, r_stripe;
+ u32 stripe_unit;
+
+ if (!pnfs_generic_pg_test(pgio, prev, req) ||
+ !nfs_generic_pg_test(pgio, prev, req))
+ return false;
+
+ p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT;
+ r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT;
+ stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
+
+ do_div(p_stripe, stripe_unit);
+ do_div(r_stripe, stripe_unit);
+
+ return (p_stripe == r_stripe);
+}
+
+static void
+filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ BUG_ON(pgio->pg_lseg != NULL);
+
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_READ,
+ GFP_KERNEL);
+ /* If no lseg, fall back to read through mds */
+ if (pgio->pg_lseg == NULL)
+ nfs_pageio_reset_read_mds(pgio);
+}
+
+static void
+filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req)
+{
+ BUG_ON(pgio->pg_lseg != NULL);
+
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_RW,
+ GFP_NOFS);
+ /* If no lseg, fall back to write through mds */
+ if (pgio->pg_lseg == NULL)
+ nfs_pageio_reset_write_mds(pgio);
+}
+
+static const struct nfs_pageio_ops filelayout_pg_read_ops = {
+ .pg_init = filelayout_pg_init_read,
+ .pg_test = filelayout_pg_test,
+ .pg_doio = pnfs_generic_pg_readpages,
+};
+
+static const struct nfs_pageio_ops filelayout_pg_write_ops = {
+ .pg_init = filelayout_pg_init_write,
+ .pg_test = filelayout_pg_test,
+ .pg_doio = pnfs_generic_pg_writepages,
+};
+
+static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
+{
+ if (fl->stripe_type == STRIPE_SPARSE)
+ return nfs4_fl_calc_ds_index(&fl->generic_hdr, j);
+ else
+ return j;
+}
+
+/* The generic layer is about to remove the req from the commit list.
+ * If this will make the bucket empty, it will need to put the lseg reference.
+ */
+static void
+filelayout_clear_request_commit(struct nfs_page *req)
+{
+ struct pnfs_layout_segment *freeme = NULL;
+ struct inode *inode = req->wb_context->dentry->d_inode;
+
+ spin_lock(&inode->i_lock);
+ if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
+ goto out;
+ if (list_is_singular(&req->wb_list)) {
+ struct pnfs_layout_segment *lseg;
+
+ /* From here we can find the bucket, but for the moment,
+ * since there is only one relevant lseg...
+ */
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ freeme = lseg;
+ break;
+ }
+ }
+ }
+out:
+ nfs_request_remove_commit_list(req);
+ spin_unlock(&inode->i_lock);
+ put_lseg(freeme);
+}
+
+static struct list_head *
+filelayout_choose_commit_list(struct nfs_page *req,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
+ u32 i, j;
+ struct list_head *list;
+
+ if (fl->commit_through_mds)
+ return &NFS_I(req->wb_context->dentry->d_inode)->commit_list;
+
+ /* Note that we are calling nfs4_fl_calc_j_index on each page
+ * that ends up being committed to a data server. An attractive
+ * alternative is to add a field to nfs_write_data and nfs_page
+ * to store the value calculated in filelayout_write_pagelist
+ * and just use that here.
+ */
+ j = nfs4_fl_calc_j_index(lseg,
+ (loff_t)req->wb_index << PAGE_CACHE_SHIFT);
+ i = select_bucket_index(fl, j);
+ list = &fl->commit_buckets[i].written;
+ if (list_empty(list)) {
+ /* Non-empty buckets hold a reference on the lseg. That ref
+ * is normally transferred to the COMMIT call and released
+ * there. It could also be released if the last req is pulled
+ * off due to a rewrite, in which case it will be done in
+ * filelayout_remove_commit_req
+ */
+ get_lseg(lseg);
+ }
+ set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ return list;
+}
+
+static void
+filelayout_mark_request_commit(struct nfs_page *req,
+ struct pnfs_layout_segment *lseg)
+{
+ struct list_head *list;
+
+ list = filelayout_choose_commit_list(req, lseg);
+ nfs_request_add_commit_list(req, list);
+}
+
+static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+
+ if (flseg->stripe_type == STRIPE_SPARSE)
+ return i;
+ else
+ return nfs4_fl_calc_ds_index(lseg, i);
+}
+
+static struct nfs_fh *
+select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+
+ if (flseg->stripe_type == STRIPE_SPARSE) {
+ if (flseg->num_fh == 1)
+ i = 0;
+ else if (flseg->num_fh == 0)
+ /* Use the MDS OPEN fh set in nfs_read_rpcsetup */
+ return NULL;
+ }
+ return flseg->fh_array[i];
+}
+
+static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
+{
+ struct pnfs_layout_segment *lseg = data->lseg;
+ struct nfs4_pnfs_ds *ds;
+ u32 idx;
+ struct nfs_fh *fh;
+
+ idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ ds = nfs4_fl_prepare_ds(lseg, idx);
+ if (!ds) {
+ printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
+ __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ prepare_to_resend_writes(data);
+ filelayout_commit_release(data);
+ return -EAGAIN;
+ }
+ dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how);
+ data->write_done_cb = filelayout_commit_done_cb;
+ data->ds_clp = ds->ds_clp;
+ fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
+ if (fh)
+ data->args.fh = fh;
+ return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient,
+ &filelayout_commit_call_ops, how);
+}
+
+/*
+ * This is only useful while we are using whole file layouts.
+ */
+static struct pnfs_layout_segment *
+find_only_write_lseg_locked(struct inode *inode)
+{
+ struct pnfs_layout_segment *lseg;
+
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
+ if (lseg->pls_range.iomode == IOMODE_RW)
+ return lseg;
+ return NULL;
+}
+
+static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
+{
+ struct pnfs_layout_segment *rv;
+
+ spin_lock(&inode->i_lock);
+ rv = find_only_write_lseg_locked(inode);
+ if (rv)
+ get_lseg(rv);
+ spin_unlock(&inode->i_lock);
+ return rv;
+}
+
+static int
+filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max,
+ spinlock_t *lock)
+{
+ struct list_head *src = &bucket->written;
+ struct list_head *dst = &bucket->committing;
+ struct nfs_page *req, *tmp;
+ int ret = 0;
+
+ list_for_each_entry_safe(req, tmp, src, wb_list) {
+ if (!nfs_lock_request(req))
+ continue;
+ if (cond_resched_lock(lock))
+ list_safe_reset_next(req, tmp, wb_list);
+ nfs_request_remove_commit_list(req);
+ clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+ nfs_list_add_request(req, dst);
+ ret++;
+ if (ret == max)
+ break;
+ }
+ return ret;
+}
+
+/* Move reqs from written to committing lists, returning count of number moved.
+ * Note called with i_lock held.
+ */
+static int filelayout_scan_commit_lists(struct inode *inode, int max,
+ spinlock_t *lock)
+{
+ struct pnfs_layout_segment *lseg;
+ struct nfs4_filelayout_segment *fl;
+ int i, rv = 0, cnt;
+
+ lseg = find_only_write_lseg_locked(inode);
+ if (!lseg)
+ goto out_done;
+ fl = FILELAYOUT_LSEG(lseg);
+ if (fl->commit_through_mds)
+ goto out_done;
+ for (i = 0; i < fl->number_of_buckets && max != 0; i++) {
+ cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i],
+ max, lock);
+ max -= cnt;
+ rv += cnt;
+ }
+out_done:
+ return rv;
+}
+
+static unsigned int
+alloc_ds_commits(struct inode *inode, struct list_head *list)
+{
+ struct pnfs_layout_segment *lseg;
+ struct nfs4_filelayout_segment *fl;
+ struct nfs_write_data *data;
+ int i, j;
+ unsigned int nreq = 0;
+
+ /* Won't need this when non-whole file layout segments are supported
+ * instead we will use a pnfs_layout_hdr structure */
+ lseg = find_only_write_lseg(inode);
+ if (!lseg)
+ return 0;
+ fl = FILELAYOUT_LSEG(lseg);
+ for (i = 0; i < fl->number_of_buckets; i++) {
+ if (list_empty(&fl->commit_buckets[i].committing))
+ continue;
+ data = nfs_commitdata_alloc();
+ if (!data)
+ break;
+ data->ds_commit_index = i;
+ data->lseg = lseg;
+ list_add(&data->pages, list);
+ nreq++;
+ }
+
+ /* Clean up on error */
+ for (j = i; j < fl->number_of_buckets; j++) {
+ if (list_empty(&fl->commit_buckets[i].committing))
+ continue;
+ nfs_retry_commit(&fl->commit_buckets[i].committing, lseg);
+ put_lseg(lseg); /* associated with emptying bucket */
+ }
+ put_lseg(lseg);
+ /* Caller will clean up entries put on list */
+ return nreq;
+}
+
+/* This follows nfs_commit_list pretty closely */
+static int
+filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ int how)
+{
+ struct nfs_write_data *data, *tmp;
+ LIST_HEAD(list);
+ unsigned int nreq = 0;
+
+ if (!list_empty(mds_pages)) {
+ data = nfs_commitdata_alloc();
+ if (data != NULL) {
+ data->lseg = NULL;
+ list_add(&data->pages, &list);
+ nreq++;
+ } else
+ nfs_retry_commit(mds_pages, NULL);
+ }
+
+ nreq += alloc_ds_commits(inode, &list);
+
+ if (nreq == 0) {
+ nfs_commit_clear_lock(NFS_I(inode));
+ goto out;
+ }
+
+ atomic_add(nreq, &NFS_I(inode)->commits_outstanding);
+
+ list_for_each_entry_safe(data, tmp, &list, pages) {
+ list_del_init(&data->pages);
+ if (!data->lseg) {
+ nfs_init_commit(data, mds_pages, NULL);
+ nfs_initiate_commit(data, NFS_CLIENT(inode),
+ data->mds_ops, how);
+ } else {
+ nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index].committing, data->lseg);
+ filelayout_initiate_commit(data, how);
+ }
+ }
+out:
+ return PNFS_ATTEMPTED;
+}
+
+static void
+filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
+{
+ nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
+}
+
+static struct pnfs_layoutdriver_type filelayout_type = {
+ .id = LAYOUT_NFSV4_1_FILES,
+ .name = "LAYOUT_NFSV4_1_FILES",
+ .owner = THIS_MODULE,
+ .alloc_lseg = filelayout_alloc_lseg,
+ .free_lseg = filelayout_free_lseg,
+ .pg_read_ops = &filelayout_pg_read_ops,
+ .pg_write_ops = &filelayout_pg_write_ops,
+ .mark_request_commit = filelayout_mark_request_commit,
+ .clear_request_commit = filelayout_clear_request_commit,
+ .scan_commit_lists = filelayout_scan_commit_lists,
+ .commit_pagelist = filelayout_commit_pagelist,
+ .read_pagelist = filelayout_read_pagelist,
+ .write_pagelist = filelayout_write_pagelist,
+ .free_deviceid_node = filelayout_free_deveiceid_node,
+};
+
+static int __init nfs4filelayout_init(void)
+{
+ printk(KERN_INFO "%s: NFSv4 File Layout Driver Registering...\n",
+ __func__);
+ return pnfs_register_layoutdriver(&filelayout_type);
+}
+
+static void __exit nfs4filelayout_exit(void)
+{
+ printk(KERN_INFO "%s: NFSv4 File Layout Driver Unregistering...\n",
+ __func__);
+ pnfs_unregister_layoutdriver(&filelayout_type);
+}
+
+MODULE_ALIAS("nfs-layouttype4-1");
+
+module_init(nfs4filelayout_init);
+module_exit(nfs4filelayout_exit);
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
new file mode 100644
index 00000000..21190bb1
--- /dev/null
+++ b/fs/nfs/nfs4filelayout.h
@@ -0,0 +1,123 @@
+/*
+ * NFSv4 file layout driver data structures.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#ifndef FS_NFS_NFS4FILELAYOUT_H
+#define FS_NFS_NFS4FILELAYOUT_H
+
+#include "pnfs.h"
+
+/*
+ * Field testing shows we need to support up to 4096 stripe indices.
+ * We store each index as a u8 (u32 on the wire) to keep the memory footprint
+ * reasonable. This in turn means we support a maximum of 256
+ * RFC 5661 multipath_list4 structures.
+ */
+#define NFS4_PNFS_MAX_STRIPE_CNT 4096
+#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */
+
+enum stripetype4 {
+ STRIPE_SPARSE = 1,
+ STRIPE_DENSE = 2
+};
+
+/* Individual ip address */
+struct nfs4_pnfs_ds_addr {
+ struct sockaddr_storage da_addr;
+ size_t da_addrlen;
+ struct list_head da_node; /* nfs4_pnfs_dev_hlist dev_dslist */
+ char *da_remotestr; /* human readable addr+port */
+};
+
+struct nfs4_pnfs_ds {
+ struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */
+ char *ds_remotestr; /* comma sep list of addrs */
+ struct list_head ds_addrs;
+ struct nfs_client *ds_clp;
+ atomic_t ds_count;
+};
+
+/* nfs4_file_layout_dsaddr flags */
+#define NFS4_DEVICE_ID_NEG_ENTRY 0x00000001
+
+struct nfs4_file_layout_dsaddr {
+ struct nfs4_deviceid_node id_node;
+ unsigned long flags;
+ u32 stripe_count;
+ u8 *stripe_indices;
+ u32 ds_num;
+ struct nfs4_pnfs_ds *ds_list[1];
+};
+
+struct nfs4_fl_commit_bucket {
+ struct list_head written;
+ struct list_head committing;
+};
+
+struct nfs4_filelayout_segment {
+ struct pnfs_layout_segment generic_hdr;
+ u32 stripe_type;
+ u32 commit_through_mds;
+ u32 stripe_unit;
+ u32 first_stripe_index;
+ u64 pattern_offset;
+ struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
+ unsigned int num_fh;
+ struct nfs_fh **fh_array;
+ struct nfs4_fl_commit_bucket *commit_buckets; /* Sort commits to ds */
+ int number_of_buckets;
+};
+
+static inline struct nfs4_filelayout_segment *
+FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
+{
+ return container_of(lseg,
+ struct nfs4_filelayout_segment,
+ generic_hdr);
+}
+
+static inline struct nfs4_deviceid_node *
+FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg)
+{
+ return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
+}
+
+extern struct nfs_fh *
+nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
+
+extern void print_ds(struct nfs4_pnfs_ds *ds);
+u32 nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset);
+u32 nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j);
+struct nfs4_pnfs_ds *nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg,
+ u32 ds_idx);
+extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
+extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
+struct nfs4_file_layout_dsaddr *
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
+
+#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
new file mode 100644
index 00000000..c9cff9ad
--- /dev/null
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -0,0 +1,838 @@
+/*
+ * Device operations for the pnfs nfs4 file layout driver.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ * Garth Goodson <Garth.Goodson@netapp.com>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/vmalloc.h>
+
+#include "internal.h"
+#include "nfs4filelayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+/*
+ * Data server cache
+ *
+ * Data servers can be mapped to different device ids.
+ * nfs4_pnfs_ds reference counting
+ * - set to 1 on allocation
+ * - incremented when a device id maps a data server already in the cache.
+ * - decremented when deviceid is removed from the cache.
+ */
+static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
+static LIST_HEAD(nfs4_data_server_cache);
+
+/* Debug routines */
+void
+print_ds(struct nfs4_pnfs_ds *ds)
+{
+ if (ds == NULL) {
+ printk("%s NULL device\n", __func__);
+ return;
+ }
+ printk(" ds %s\n"
+ " ref count %d\n"
+ " client %p\n"
+ " cl_exchange_flags %x\n",
+ ds->ds_remotestr,
+ atomic_read(&ds->ds_count), ds->ds_clp,
+ ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
+}
+
+static bool
+same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
+{
+ struct sockaddr_in *a, *b;
+ struct sockaddr_in6 *a6, *b6;
+
+ if (addr1->sa_family != addr2->sa_family)
+ return false;
+
+ switch (addr1->sa_family) {
+ case AF_INET:
+ a = (struct sockaddr_in *)addr1;
+ b = (struct sockaddr_in *)addr2;
+
+ if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
+ a->sin_port == b->sin_port)
+ return true;
+ break;
+
+ case AF_INET6:
+ a6 = (struct sockaddr_in6 *)addr1;
+ b6 = (struct sockaddr_in6 *)addr2;
+
+ /* LINKLOCAL addresses must have matching scope_id */
+ if (ipv6_addr_scope(&a6->sin6_addr) ==
+ IPV6_ADDR_SCOPE_LINKLOCAL &&
+ a6->sin6_scope_id != b6->sin6_scope_id)
+ return false;
+
+ if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
+ a6->sin6_port == b6->sin6_port)
+ return true;
+ break;
+
+ default:
+ dprintk("%s: unhandled address family: %u\n",
+ __func__, addr1->sa_family);
+ return false;
+ }
+
+ return false;
+}
+
+static bool
+_same_data_server_addrs_locked(const struct list_head *dsaddrs1,
+ const struct list_head *dsaddrs2)
+{
+ struct nfs4_pnfs_ds_addr *da1, *da2;
+
+ /* step through both lists, comparing as we go */
+ for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
+ da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
+ da1 != NULL && da2 != NULL;
+ da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
+ da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
+ if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
+ (struct sockaddr *)&da2->da_addr))
+ return false;
+ }
+ if (da1 == NULL && da2 == NULL)
+ return true;
+
+ return false;
+}
+
+/*
+ * Lookup DS by addresses. nfs4_ds_cache_lock is held
+ */
+static struct nfs4_pnfs_ds *
+_data_server_lookup_locked(const struct list_head *dsaddrs)
+{
+ struct nfs4_pnfs_ds *ds;
+
+ list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
+ if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
+ return ds;
+ return NULL;
+}
+
+/*
+ * Create an rpc connection to the nfs4_pnfs_ds data server
+ * Currently only supports IPv4 and IPv6 addresses
+ */
+static int
+nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
+{
+ struct nfs_client *clp = ERR_PTR(-EIO);
+ struct nfs4_pnfs_ds_addr *da;
+ int status = 0;
+
+ dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
+ mds_srv->nfs_client->cl_rpcclient->cl_auth->au_flavor);
+
+ BUG_ON(list_empty(&ds->ds_addrs));
+
+ list_for_each_entry(da, &ds->ds_addrs, da_node) {
+ dprintk("%s: DS %s: trying address %s\n",
+ __func__, ds->ds_remotestr, da->da_remotestr);
+
+ clp = nfs4_set_ds_client(mds_srv->nfs_client,
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP);
+ if (!IS_ERR(clp))
+ break;
+ }
+
+ if (IS_ERR(clp)) {
+ status = PTR_ERR(clp);
+ goto out;
+ }
+
+ if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) {
+ if (!is_ds_client(clp)) {
+ status = -ENODEV;
+ goto out_put;
+ }
+ ds->ds_clp = clp;
+ dprintk("%s [existing] server=%s\n", __func__,
+ ds->ds_remotestr);
+ goto out;
+ }
+
+ /*
+ * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to
+ * be equal to the MDS lease. Renewal is scheduled in create_session.
+ */
+ spin_lock(&mds_srv->nfs_client->cl_lock);
+ clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time;
+ spin_unlock(&mds_srv->nfs_client->cl_lock);
+ clp->cl_last_renewal = jiffies;
+
+ /* New nfs_client */
+ status = nfs4_init_ds_session(clp);
+ if (status)
+ goto out_put;
+
+ ds->ds_clp = clp;
+ dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
+out:
+ return status;
+out_put:
+ nfs_put_client(clp);
+ goto out;
+}
+
+static void
+destroy_ds(struct nfs4_pnfs_ds *ds)
+{
+ struct nfs4_pnfs_ds_addr *da;
+
+ dprintk("--> %s\n", __func__);
+ ifdebug(FACILITY)
+ print_ds(ds);
+
+ if (ds->ds_clp)
+ nfs_put_client(ds->ds_clp);
+
+ while (!list_empty(&ds->ds_addrs)) {
+ da = list_first_entry(&ds->ds_addrs,
+ struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+
+ kfree(ds->ds_remotestr);
+ kfree(ds);
+}
+
+void
+nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
+{
+ struct nfs4_pnfs_ds *ds;
+ int i;
+
+ nfs4_print_deviceid(&dsaddr->id_node.deviceid);
+
+ for (i = 0; i < dsaddr->ds_num; i++) {
+ ds = dsaddr->ds_list[i];
+ if (ds != NULL) {
+ if (atomic_dec_and_lock(&ds->ds_count,
+ &nfs4_ds_cache_lock)) {
+ list_del_init(&ds->ds_node);
+ spin_unlock(&nfs4_ds_cache_lock);
+ destroy_ds(ds);
+ }
+ }
+ }
+ kfree(dsaddr->stripe_indices);
+ kfree(dsaddr);
+}
+
+/*
+ * Create a string with a human readable address and port to avoid
+ * complicated setup around many dprinks.
+ */
+static char *
+nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds_addr *da;
+ char *remotestr;
+ size_t len;
+ char *p;
+
+ len = 3; /* '{', '}' and eol */
+ list_for_each_entry(da, dsaddrs, da_node) {
+ len += strlen(da->da_remotestr) + 1; /* string plus comma */
+ }
+
+ remotestr = kzalloc(len, gfp_flags);
+ if (!remotestr)
+ return NULL;
+
+ p = remotestr;
+ *(p++) = '{';
+ len--;
+ list_for_each_entry(da, dsaddrs, da_node) {
+ size_t ll = strlen(da->da_remotestr);
+
+ if (ll > len)
+ goto out_err;
+
+ memcpy(p, da->da_remotestr, ll);
+ p += ll;
+ len -= ll;
+
+ if (len < 1)
+ goto out_err;
+ (*p++) = ',';
+ len--;
+ }
+ if (len < 2)
+ goto out_err;
+ *(p++) = '}';
+ *p = '\0';
+ return remotestr;
+out_err:
+ kfree(remotestr);
+ return NULL;
+}
+
+static struct nfs4_pnfs_ds *
+nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
+ char *remotestr;
+
+ if (list_empty(dsaddrs)) {
+ dprintk("%s: no addresses defined\n", __func__);
+ goto out;
+ }
+
+ ds = kzalloc(sizeof(*ds), gfp_flags);
+ if (!ds)
+ goto out;
+
+ /* this is only used for debugging, so it's ok if its NULL */
+ remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
+
+ spin_lock(&nfs4_ds_cache_lock);
+ tmp_ds = _data_server_lookup_locked(dsaddrs);
+ if (tmp_ds == NULL) {
+ INIT_LIST_HEAD(&ds->ds_addrs);
+ list_splice_init(dsaddrs, &ds->ds_addrs);
+ ds->ds_remotestr = remotestr;
+ atomic_set(&ds->ds_count, 1);
+ INIT_LIST_HEAD(&ds->ds_node);
+ ds->ds_clp = NULL;
+ list_add(&ds->ds_node, &nfs4_data_server_cache);
+ dprintk("%s add new data server %s\n", __func__,
+ ds->ds_remotestr);
+ } else {
+ kfree(remotestr);
+ kfree(ds);
+ atomic_inc(&tmp_ds->ds_count);
+ dprintk("%s data server %s found, inc'ed ds_count to %d\n",
+ __func__, tmp_ds->ds_remotestr,
+ atomic_read(&tmp_ds->ds_count));
+ ds = tmp_ds;
+ }
+ spin_unlock(&nfs4_ds_cache_lock);
+out:
+ return ds;
+}
+
+/*
+ * Currently only supports ipv4, ipv6 and one multi-path address.
+ */
+static struct nfs4_pnfs_ds_addr *
+decode_ds_addr(struct net *net, struct xdr_stream *streamp, gfp_t gfp_flags)
+{
+ struct nfs4_pnfs_ds_addr *da = NULL;
+ char *buf, *portstr;
+ __be16 port;
+ int nlen, rlen;
+ int tmp[2];
+ __be32 *p;
+ char *netid, *match_netid;
+ size_t len, match_netid_len;
+ char *startsep = "";
+ char *endsep = "";
+
+
+ /* r_netid */
+ p = xdr_inline_decode(streamp, 4);
+ if (unlikely(!p))
+ goto out_err;
+ nlen = be32_to_cpup(p++);
+
+ p = xdr_inline_decode(streamp, nlen);
+ if (unlikely(!p))
+ goto out_err;
+
+ netid = kmalloc(nlen+1, gfp_flags);
+ if (unlikely(!netid))
+ goto out_err;
+
+ netid[nlen] = '\0';
+ memcpy(netid, p, nlen);
+
+ /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
+ p = xdr_inline_decode(streamp, 4);
+ if (unlikely(!p))
+ goto out_free_netid;
+ rlen = be32_to_cpup(p);
+
+ p = xdr_inline_decode(streamp, rlen);
+ if (unlikely(!p))
+ goto out_free_netid;
+
+ /* port is ".ABC.DEF", 8 chars max */
+ if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
+ dprintk("%s: Invalid address, length %d\n", __func__,
+ rlen);
+ goto out_free_netid;
+ }
+ buf = kmalloc(rlen + 1, gfp_flags);
+ if (!buf) {
+ dprintk("%s: Not enough memory\n", __func__);
+ goto out_free_netid;
+ }
+ buf[rlen] = '\0';
+ memcpy(buf, p, rlen);
+
+ /* replace port '.' with '-' */
+ portstr = strrchr(buf, '.');
+ if (!portstr) {
+ dprintk("%s: Failed finding expected dot in port\n",
+ __func__);
+ goto out_free_buf;
+ }
+ *portstr = '-';
+
+ /* find '.' between address and port */
+ portstr = strrchr(buf, '.');
+ if (!portstr) {
+ dprintk("%s: Failed finding expected dot between address and "
+ "port\n", __func__);
+ goto out_free_buf;
+ }
+ *portstr = '\0';
+
+ da = kzalloc(sizeof(*da), gfp_flags);
+ if (unlikely(!da))
+ goto out_free_buf;
+
+ INIT_LIST_HEAD(&da->da_node);
+
+ if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
+ sizeof(da->da_addr))) {
+ dprintk("%s: error parsing address %s\n", __func__, buf);
+ goto out_free_da;
+ }
+
+ portstr++;
+ sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
+ port = htons((tmp[0] << 8) | (tmp[1]));
+
+ switch (da->da_addr.ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
+ da->da_addrlen = sizeof(struct sockaddr_in);
+ match_netid = "tcp";
+ match_netid_len = 3;
+ break;
+
+ case AF_INET6:
+ ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
+ da->da_addrlen = sizeof(struct sockaddr_in6);
+ match_netid = "tcp6";
+ match_netid_len = 4;
+ startsep = "[";
+ endsep = "]";
+ break;
+
+ default:
+ dprintk("%s: unsupported address family: %u\n",
+ __func__, da->da_addr.ss_family);
+ goto out_free_da;
+ }
+
+ if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
+ dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
+ __func__, netid, match_netid);
+ goto out_free_da;
+ }
+
+ /* save human readable address */
+ len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
+ da->da_remotestr = kzalloc(len, gfp_flags);
+
+ /* NULL is ok, only used for dprintk */
+ if (da->da_remotestr)
+ snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
+ buf, endsep, ntohs(port));
+
+ dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
+ kfree(buf);
+ kfree(netid);
+ return da;
+
+out_free_da:
+ kfree(da);
+out_free_buf:
+ dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
+ kfree(buf);
+out_free_netid:
+ kfree(netid);
+out_err:
+ return NULL;
+}
+
+/* Decode opaque device data and return the result */
+static struct nfs4_file_layout_dsaddr*
+decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
+{
+ int i;
+ u32 cnt, num;
+ u8 *indexp;
+ __be32 *p;
+ u8 *stripe_indices;
+ u8 max_stripe_index;
+ struct nfs4_file_layout_dsaddr *dsaddr = NULL;
+ struct xdr_stream stream;
+ struct xdr_buf buf;
+ struct page *scratch;
+ struct list_head dsaddrs;
+ struct nfs4_pnfs_ds_addr *da;
+
+ /* set up xdr stream */
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ goto out_err;
+
+ xdr_init_decode_pages(&stream, &buf, pdev->pages, pdev->pglen);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ /* Get the stripe count (number of stripe index) */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_scratch;
+
+ cnt = be32_to_cpup(p);
+ dprintk("%s stripe count %d\n", __func__, cnt);
+ if (cnt > NFS4_PNFS_MAX_STRIPE_CNT) {
+ printk(KERN_WARNING "NFS: %s: stripe count %d greater than "
+ "supported maximum %d\n", __func__,
+ cnt, NFS4_PNFS_MAX_STRIPE_CNT);
+ goto out_err_free_scratch;
+ }
+
+ /* read stripe indices */
+ stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags);
+ if (!stripe_indices)
+ goto out_err_free_scratch;
+
+ p = xdr_inline_decode(&stream, cnt << 2);
+ if (unlikely(!p))
+ goto out_err_free_stripe_indices;
+
+ indexp = &stripe_indices[0];
+ max_stripe_index = 0;
+ for (i = 0; i < cnt; i++) {
+ *indexp = be32_to_cpup(p++);
+ max_stripe_index = max(max_stripe_index, *indexp);
+ indexp++;
+ }
+
+ /* Check the multipath list count */
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_stripe_indices;
+
+ num = be32_to_cpup(p);
+ dprintk("%s ds_num %u\n", __func__, num);
+ if (num > NFS4_PNFS_MAX_MULTI_CNT) {
+ printk(KERN_WARNING "NFS: %s: multipath count %d greater than "
+ "supported maximum %d\n", __func__,
+ num, NFS4_PNFS_MAX_MULTI_CNT);
+ goto out_err_free_stripe_indices;
+ }
+
+ /* validate stripe indices are all < num */
+ if (max_stripe_index >= num) {
+ printk(KERN_WARNING "NFS: %s: stripe index %u >= num ds %u\n",
+ __func__, max_stripe_index, num);
+ goto out_err_free_stripe_indices;
+ }
+
+ dsaddr = kzalloc(sizeof(*dsaddr) +
+ (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
+ gfp_flags);
+ if (!dsaddr)
+ goto out_err_free_stripe_indices;
+
+ dsaddr->stripe_count = cnt;
+ dsaddr->stripe_indices = stripe_indices;
+ stripe_indices = NULL;
+ dsaddr->ds_num = num;
+ nfs4_init_deviceid_node(&dsaddr->id_node,
+ NFS_SERVER(ino)->pnfs_curr_ld,
+ NFS_SERVER(ino)->nfs_client,
+ &pdev->dev_id);
+
+ INIT_LIST_HEAD(&dsaddrs);
+
+ for (i = 0; i < dsaddr->ds_num; i++) {
+ int j;
+ u32 mp_count;
+
+ p = xdr_inline_decode(&stream, 4);
+ if (unlikely(!p))
+ goto out_err_free_deviceid;
+
+ mp_count = be32_to_cpup(p); /* multipath count */
+ for (j = 0; j < mp_count; j++) {
+ da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net,
+ &stream, gfp_flags);
+ if (da)
+ list_add_tail(&da->da_node, &dsaddrs);
+ }
+ if (list_empty(&dsaddrs)) {
+ dprintk("%s: no suitable DS addresses found\n",
+ __func__);
+ goto out_err_free_deviceid;
+ }
+
+ dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags);
+ if (!dsaddr->ds_list[i])
+ goto out_err_drain_dsaddrs;
+
+ /* If DS was already in cache, free ds addrs */
+ while (!list_empty(&dsaddrs)) {
+ da = list_first_entry(&dsaddrs,
+ struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+ }
+
+ __free_page(scratch);
+ return dsaddr;
+
+out_err_drain_dsaddrs:
+ while (!list_empty(&dsaddrs)) {
+ da = list_first_entry(&dsaddrs, struct nfs4_pnfs_ds_addr,
+ da_node);
+ list_del_init(&da->da_node);
+ kfree(da->da_remotestr);
+ kfree(da);
+ }
+out_err_free_deviceid:
+ nfs4_fl_free_deviceid(dsaddr);
+ /* stripe_indicies was part of dsaddr */
+ goto out_err_free_scratch;
+out_err_free_stripe_indices:
+ kfree(stripe_indices);
+out_err_free_scratch:
+ __free_page(scratch);
+out_err:
+ dprintk("%s ERROR: returning NULL\n", __func__);
+ return NULL;
+}
+
+/*
+ * Decode the opaque device specified in 'dev' and add it to the cache of
+ * available devices.
+ */
+static struct nfs4_file_layout_dsaddr *
+decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags)
+{
+ struct nfs4_deviceid_node *d;
+ struct nfs4_file_layout_dsaddr *n, *new;
+
+ new = decode_device(inode, dev, gfp_flags);
+ if (!new) {
+ printk(KERN_WARNING "NFS: %s: Could not decode or add device\n",
+ __func__);
+ return NULL;
+ }
+
+ d = nfs4_insert_deviceid_node(&new->id_node);
+ n = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
+ if (n != new) {
+ nfs4_fl_free_deviceid(new);
+ return n;
+ }
+
+ return new;
+}
+
+/*
+ * Retrieve the information for dev_id, add it to the list
+ * of available devices, and return it.
+ */
+struct nfs4_file_layout_dsaddr *
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags)
+{
+ struct pnfs_device *pdev = NULL;
+ u32 max_resp_sz;
+ int max_pages;
+ struct page **pages = NULL;
+ struct nfs4_file_layout_dsaddr *dsaddr = NULL;
+ int rc, i;
+ struct nfs_server *server = NFS_SERVER(inode);
+
+ /*
+ * Use the session max response size as the basis for setting
+ * GETDEVICEINFO's maxcount
+ */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ max_pages = nfs_page_array_len(0, max_resp_sz);
+ dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
+ __func__, inode, max_resp_sz, max_pages);
+
+ pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags);
+ if (pdev == NULL)
+ return NULL;
+
+ pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
+ if (pages == NULL) {
+ kfree(pdev);
+ return NULL;
+ }
+ for (i = 0; i < max_pages; i++) {
+ pages[i] = alloc_page(gfp_flags);
+ if (!pages[i])
+ goto out_free;
+ }
+
+ memcpy(&pdev->dev_id, dev_id, sizeof(*dev_id));
+ pdev->layout_type = LAYOUT_NFSV4_1_FILES;
+ pdev->pages = pages;
+ pdev->pgbase = 0;
+ pdev->pglen = PAGE_SIZE * max_pages;
+ pdev->mincount = 0;
+
+ rc = nfs4_proc_getdeviceinfo(server, pdev);
+ dprintk("%s getdevice info returns %d\n", __func__, rc);
+ if (rc)
+ goto out_free;
+
+ /*
+ * Found new device, need to decode it and then add it to the
+ * list of known devices for this mountpoint.
+ */
+ dsaddr = decode_and_add_device(inode, pdev, gfp_flags);
+out_free:
+ for (i = 0; i < max_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ kfree(pdev);
+ dprintk("<-- %s dsaddr %p\n", __func__, dsaddr);
+ return dsaddr;
+}
+
+void
+nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
+{
+ nfs4_put_deviceid_node(&dsaddr->id_node);
+}
+
+/*
+ * Want res = (offset - layout->pattern_offset)/ layout->stripe_unit
+ * Then: ((res + fsi) % dsaddr->stripe_count)
+ */
+u32
+nfs4_fl_calc_j_index(struct pnfs_layout_segment *lseg, loff_t offset)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+ u64 tmp;
+
+ tmp = offset - flseg->pattern_offset;
+ do_div(tmp, flseg->stripe_unit);
+ tmp += flseg->first_stripe_index;
+ return do_div(tmp, flseg->dsaddr->stripe_count);
+}
+
+u32
+nfs4_fl_calc_ds_index(struct pnfs_layout_segment *lseg, u32 j)
+{
+ return FILELAYOUT_LSEG(lseg)->dsaddr->stripe_indices[j];
+}
+
+struct nfs_fh *
+nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
+{
+ struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);
+ u32 i;
+
+ if (flseg->stripe_type == STRIPE_SPARSE) {
+ if (flseg->num_fh == 1)
+ i = 0;
+ else if (flseg->num_fh == 0)
+ /* Use the MDS OPEN fh set in nfs_read_rpcsetup */
+ return NULL;
+ else
+ i = nfs4_fl_calc_ds_index(lseg, j);
+ } else
+ i = j;
+ return flseg->fh_array[i];
+}
+
+static void
+filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
+ int err, const char *ds_remotestr)
+{
+ u32 *p = (u32 *)&dsaddr->id_node.deviceid;
+
+ printk(KERN_ERR "NFS: data server %s connection error %d."
+ " Deviceid [%x%x%x%x] marked out of use.\n",
+ ds_remotestr, err, p[0], p[1], p[2], p[3]);
+
+ spin_lock(&nfs4_ds_cache_lock);
+ dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
+ spin_unlock(&nfs4_ds_cache_lock);
+}
+
+struct nfs4_pnfs_ds *
+nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
+{
+ struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
+ struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
+
+ if (ds == NULL) {
+ printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
+ __func__, ds_idx);
+ return NULL;
+ }
+
+ if (!ds->ds_clp) {
+ struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
+ int err;
+
+ if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) {
+ /* Already tried to connect, don't try again */
+ dprintk("%s Deviceid marked out of use\n", __func__);
+ return NULL;
+ }
+ err = nfs4_ds_connect(s, ds);
+ if (err) {
+ filelayout_mark_devid_negative(dsaddr, err,
+ ds->ds_remotestr);
+ return NULL;
+ }
+ }
+ return ds;
+}
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
new file mode 100644
index 00000000..a7f3dedc
--- /dev/null
+++ b/fs/nfs/nfs4namespace.c
@@ -0,0 +1,343 @@
+/*
+ * linux/fs/nfs/nfs4namespace.c
+ *
+ * Copyright (C) 2005 Trond Myklebust <Trond.Myklebust@netapp.com>
+ * - Modified by David Howells <dhowells@redhat.com>
+ *
+ * NFSv4 namespace
+ */
+
+#include <linux/dcache.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/nfs_fs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/vfs.h>
+#include <linux/inet.h>
+#include "internal.h"
+#include "nfs4_fs.h"
+#include "dns_resolve.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+/*
+ * Convert the NFSv4 pathname components into a standard posix path.
+ *
+ * Note that the resulting string will be placed at the end of the buffer
+ */
+static inline char *nfs4_pathname_string(const struct nfs4_pathname *pathname,
+ char *buffer, ssize_t buflen)
+{
+ char *end = buffer + buflen;
+ int n;
+
+ *--end = '\0';
+ buflen--;
+
+ n = pathname->ncomponents;
+ while (--n >= 0) {
+ const struct nfs4_string *component = &pathname->components[n];
+ buflen -= component->len + 1;
+ if (buflen < 0)
+ goto Elong;
+ end -= component->len;
+ memcpy(end, component->data, component->len);
+ *--end = '/';
+ }
+ return end;
+Elong:
+ return ERR_PTR(-ENAMETOOLONG);
+}
+
+/*
+ * return the path component of "<server>:<path>"
+ * nfspath - the "<server>:<path>" string
+ * end - one past the last char that could contain "<server>:"
+ * returns NULL on failure
+ */
+static char *nfs_path_component(const char *nfspath, const char *end)
+{
+ char *p;
+
+ if (*nfspath == '[') {
+ /* parse [] escaped IPv6 addrs */
+ p = strchr(nfspath, ']');
+ if (p != NULL && ++p < end && *p == ':')
+ return p + 1;
+ } else {
+ /* otherwise split on first colon */
+ p = strchr(nfspath, ':');
+ if (p != NULL && p < end)
+ return p + 1;
+ }
+ return NULL;
+}
+
+/*
+ * Determine the mount path as a string
+ */
+static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen)
+{
+ char *limit;
+ char *path = nfs_path(&limit, dentry, buffer, buflen);
+ if (!IS_ERR(path)) {
+ char *path_component = nfs_path_component(path, limit);
+ if (path_component)
+ return path_component;
+ }
+ return path;
+}
+
+/*
+ * Check that fs_locations::fs_root [RFC3530 6.3] is a prefix for what we
+ * believe to be the server path to this dentry
+ */
+static int nfs4_validate_fspath(struct dentry *dentry,
+ const struct nfs4_fs_locations *locations,
+ char *page, char *page2)
+{
+ const char *path, *fs_path;
+
+ path = nfs4_path(dentry, page, PAGE_SIZE);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+
+ fs_path = nfs4_pathname_string(&locations->fs_path, page2, PAGE_SIZE);
+ if (IS_ERR(fs_path))
+ return PTR_ERR(fs_path);
+
+ if (strncmp(path, fs_path, strlen(fs_path)) != 0) {
+ dprintk("%s: path %s does not begin with fsroot %s\n",
+ __func__, path, fs_path);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static size_t nfs_parse_server_name(char *string, size_t len,
+ struct sockaddr *sa, size_t salen, struct nfs_server *server)
+{
+ struct net *net = rpc_net_ns(server->client);
+ ssize_t ret;
+
+ ret = rpc_pton(net, string, len, sa, salen);
+ if (ret == 0) {
+ ret = nfs_dns_resolve_name(net, string, len, sa, salen);
+ if (ret < 0)
+ ret = 0;
+ }
+ return ret;
+}
+
+static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
+{
+ struct page *page;
+ struct nfs4_secinfo_flavors *flavors;
+ rpc_authflavor_t flavor;
+ int err;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+ flavors = page_address(page);
+
+ err = nfs4_proc_secinfo(inode, name, flavors);
+ if (err < 0) {
+ flavor = err;
+ goto out;
+ }
+
+ flavor = nfs_find_best_sec(flavors);
+
+out:
+ put_page(page);
+ return flavor;
+}
+
+/*
+ * Please call rpc_shutdown_client() when you are done with this client.
+ */
+struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode,
+ struct qstr *name)
+{
+ struct rpc_clnt *clone;
+ struct rpc_auth *auth;
+ rpc_authflavor_t flavor;
+
+ flavor = nfs4_negotiate_security(inode, name);
+ if (flavor < 0)
+ return ERR_PTR(flavor);
+
+ clone = rpc_clone_client(clnt);
+ if (IS_ERR(clone))
+ return clone;
+
+ auth = rpcauth_create(flavor, clone);
+ if (!auth) {
+ rpc_shutdown_client(clone);
+ clone = ERR_PTR(-EIO);
+ }
+
+ return clone;
+}
+
+static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
+ char *page, char *page2,
+ const struct nfs4_fs_location *location)
+{
+ const size_t addr_bufsize = sizeof(struct sockaddr_storage);
+ struct vfsmount *mnt = ERR_PTR(-ENOENT);
+ char *mnt_path;
+ unsigned int maxbuflen;
+ unsigned int s;
+
+ mnt_path = nfs4_pathname_string(&location->rootpath, page2, PAGE_SIZE);
+ if (IS_ERR(mnt_path))
+ return ERR_CAST(mnt_path);
+ mountdata->mnt_path = mnt_path;
+ maxbuflen = mnt_path - 1 - page2;
+
+ mountdata->addr = kmalloc(addr_bufsize, GFP_KERNEL);
+ if (mountdata->addr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ for (s = 0; s < location->nservers; s++) {
+ const struct nfs4_string *buf = &location->servers[s];
+
+ if (buf->len <= 0 || buf->len >= maxbuflen)
+ continue;
+
+ if (memchr(buf->data, IPV6_SCOPE_DELIMITER, buf->len))
+ continue;
+
+ mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
+ mountdata->addr, addr_bufsize,
+ NFS_SB(mountdata->sb));
+ if (mountdata->addrlen == 0)
+ continue;
+
+ rpc_set_port(mountdata->addr, NFS_PORT);
+
+ memcpy(page2, buf->data, buf->len);
+ page2[buf->len] = '\0';
+ mountdata->hostname = page2;
+
+ snprintf(page, PAGE_SIZE, "%s:%s",
+ mountdata->hostname,
+ mountdata->mnt_path);
+
+ mnt = vfs_kern_mount(&nfs4_referral_fs_type, 0, page, mountdata);
+ if (!IS_ERR(mnt))
+ break;
+ }
+ kfree(mountdata->addr);
+ return mnt;
+}
+
+/**
+ * nfs_follow_referral - set up mountpoint when hitting a referral on moved error
+ * @dentry - parent directory
+ * @locations - array of NFSv4 server location information
+ *
+ */
+static struct vfsmount *nfs_follow_referral(struct dentry *dentry,
+ const struct nfs4_fs_locations *locations)
+{
+ struct vfsmount *mnt = ERR_PTR(-ENOENT);
+ struct nfs_clone_mount mountdata = {
+ .sb = dentry->d_sb,
+ .dentry = dentry,
+ .authflavor = NFS_SB(dentry->d_sb)->client->cl_auth->au_flavor,
+ };
+ char *page = NULL, *page2 = NULL;
+ int loc, error;
+
+ if (locations == NULL || locations->nlocations <= 0)
+ goto out;
+
+ dprintk("%s: referral at %s/%s\n", __func__,
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
+ page = (char *) __get_free_page(GFP_USER);
+ if (!page)
+ goto out;
+
+ page2 = (char *) __get_free_page(GFP_USER);
+ if (!page2)
+ goto out;
+
+ /* Ensure fs path is a prefix of current dentry path */
+ error = nfs4_validate_fspath(dentry, locations, page, page2);
+ if (error < 0) {
+ mnt = ERR_PTR(error);
+ goto out;
+ }
+
+ for (loc = 0; loc < locations->nlocations; loc++) {
+ const struct nfs4_fs_location *location = &locations->locations[loc];
+
+ if (location == NULL || location->nservers <= 0 ||
+ location->rootpath.ncomponents == 0)
+ continue;
+
+ mnt = try_location(&mountdata, page, page2, location);
+ if (!IS_ERR(mnt))
+ break;
+ }
+
+out:
+ free_page((unsigned long) page);
+ free_page((unsigned long) page2);
+ dprintk("%s: done\n", __func__);
+ return mnt;
+}
+
+/*
+ * nfs_do_refmount - handle crossing a referral on server
+ * @dentry - dentry of referral
+ *
+ */
+struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
+{
+ struct vfsmount *mnt = ERR_PTR(-ENOMEM);
+ struct dentry *parent;
+ struct nfs4_fs_locations *fs_locations = NULL;
+ struct page *page;
+ int err;
+
+ /* BUG_ON(IS_ROOT(dentry)); */
+ dprintk("%s: enter\n", __func__);
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ goto out;
+
+ fs_locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+ if (fs_locations == NULL)
+ goto out_free;
+
+ /* Get locations */
+ mnt = ERR_PTR(-ENOENT);
+
+ parent = dget_parent(dentry);
+ dprintk("%s: getting locations for %s/%s\n",
+ __func__, parent->d_name.name, dentry->d_name.name);
+
+ err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page);
+ dput(parent);
+ if (err != 0 ||
+ fs_locations->nlocations <= 0 ||
+ fs_locations->fs_path.ncomponents <= 0)
+ goto out_free;
+
+ mnt = nfs_follow_referral(dentry, fs_locations);
+out_free:
+ __free_page(page);
+ kfree(fs_locations);
+out:
+ dprintk("%s: done\n", __func__);
+ return mnt;
+}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
new file mode 100644
index 00000000..d60c2d87
--- /dev/null
+++ b/fs/nfs/nfs4proc.c
@@ -0,0 +1,6625 @@
+/*
+ * fs/nfs/nfs4proc.c
+ *
+ * Client-side procedure declarations for NFSv4.
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ * Andy Adamson <andros@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/gss_api.h>
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/nfs_mount.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/module.h>
+#include <linux/nfs_idmap.h>
+#include <linux/sunrpc/bc_xprt.h>
+#include <linux/xattr.h>
+#include <linux/utsname.h>
+#include <linux/freezer.h>
+
+#include "nfs4_fs.h"
+#include "delegation.h"
+#include "internal.h"
+#include "iostat.h"
+#include "callback.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PROC
+
+#define NFS4_POLL_RETRY_MIN (HZ/10)
+#define NFS4_POLL_RETRY_MAX (15*HZ)
+
+#define NFS4_MAX_LOOP_ON_RECOVER (10)
+
+static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
+
+struct nfs4_opendata;
+static int _nfs4_proc_open(struct nfs4_opendata *data);
+static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
+static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
+static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
+static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
+static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
+static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_fattr *fattr, struct iattr *sattr,
+ struct nfs4_state *state);
+#ifdef CONFIG_NFS_V4_1
+static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
+static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
+#endif
+/* Prevent leaks of NFSv4 errors into userland */
+static int nfs4_map_errors(int err)
+{
+ if (err >= -1000)
+ return err;
+ switch (err) {
+ case -NFS4ERR_RESOURCE:
+ return -EREMOTEIO;
+ case -NFS4ERR_WRONGSEC:
+ return -EPERM;
+ case -NFS4ERR_BADOWNER:
+ case -NFS4ERR_BADNAME:
+ return -EINVAL;
+ case -NFS4ERR_SHARE_DENIED:
+ return -EACCES;
+ default:
+ dprintk("%s could not handle NFSv4 error %d\n",
+ __func__, -err);
+ break;
+ }
+ return -EIO;
+}
+
+/*
+ * This is our standard bitmap for GETATTR requests.
+ */
+const u32 nfs4_fattr_bitmap[2] = {
+ FATTR4_WORD0_TYPE
+ | FATTR4_WORD0_CHANGE
+ | FATTR4_WORD0_SIZE
+ | FATTR4_WORD0_FSID
+ | FATTR4_WORD0_FILEID,
+ FATTR4_WORD1_MODE
+ | FATTR4_WORD1_NUMLINKS
+ | FATTR4_WORD1_OWNER
+ | FATTR4_WORD1_OWNER_GROUP
+ | FATTR4_WORD1_RAWDEV
+ | FATTR4_WORD1_SPACE_USED
+ | FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_METADATA
+ | FATTR4_WORD1_TIME_MODIFY
+};
+
+const u32 nfs4_statfs_bitmap[2] = {
+ FATTR4_WORD0_FILES_AVAIL
+ | FATTR4_WORD0_FILES_FREE
+ | FATTR4_WORD0_FILES_TOTAL,
+ FATTR4_WORD1_SPACE_AVAIL
+ | FATTR4_WORD1_SPACE_FREE
+ | FATTR4_WORD1_SPACE_TOTAL
+};
+
+const u32 nfs4_pathconf_bitmap[2] = {
+ FATTR4_WORD0_MAXLINK
+ | FATTR4_WORD0_MAXNAME,
+ 0
+};
+
+const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
+ | FATTR4_WORD0_MAXREAD
+ | FATTR4_WORD0_MAXWRITE
+ | FATTR4_WORD0_LEASE_TIME,
+ FATTR4_WORD1_TIME_DELTA
+ | FATTR4_WORD1_FS_LAYOUT_TYPES,
+ FATTR4_WORD2_LAYOUT_BLKSIZE
+};
+
+const u32 nfs4_fs_locations_bitmap[2] = {
+ FATTR4_WORD0_TYPE
+ | FATTR4_WORD0_CHANGE
+ | FATTR4_WORD0_SIZE
+ | FATTR4_WORD0_FSID
+ | FATTR4_WORD0_FILEID
+ | FATTR4_WORD0_FS_LOCATIONS,
+ FATTR4_WORD1_MODE
+ | FATTR4_WORD1_NUMLINKS
+ | FATTR4_WORD1_OWNER
+ | FATTR4_WORD1_OWNER_GROUP
+ | FATTR4_WORD1_RAWDEV
+ | FATTR4_WORD1_SPACE_USED
+ | FATTR4_WORD1_TIME_ACCESS
+ | FATTR4_WORD1_TIME_METADATA
+ | FATTR4_WORD1_TIME_MODIFY
+ | FATTR4_WORD1_MOUNTED_ON_FILEID
+};
+
+static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
+ struct nfs4_readdir_arg *readdir)
+{
+ __be32 *start, *p;
+
+ BUG_ON(readdir->count < 80);
+ if (cookie > 2) {
+ readdir->cookie = cookie;
+ memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
+ return;
+ }
+
+ readdir->cookie = 0;
+ memset(&readdir->verifier, 0, sizeof(readdir->verifier));
+ if (cookie == 2)
+ return;
+
+ /*
+ * NFSv4 servers do not return entries for '.' and '..'
+ * Therefore, we fake these entries here. We let '.'
+ * have cookie 0 and '..' have cookie 1. Note that
+ * when talking to the server, we always send cookie 0
+ * instead of 1 or 2.
+ */
+ start = p = kmap_atomic(*readdir->pages);
+
+ if (cookie == 0) {
+ *p++ = xdr_one; /* next */
+ *p++ = xdr_zero; /* cookie, first word */
+ *p++ = xdr_one; /* cookie, second word */
+ *p++ = xdr_one; /* entry len */
+ memcpy(p, ".\0\0\0", 4); /* entry */
+ p++;
+ *p++ = xdr_one; /* bitmap length */
+ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
+ *p++ = htonl(8); /* attribute buffer length */
+ p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
+ }
+
+ *p++ = xdr_one; /* next */
+ *p++ = xdr_zero; /* cookie, first word */
+ *p++ = xdr_two; /* cookie, second word */
+ *p++ = xdr_two; /* entry len */
+ memcpy(p, "..\0\0", 4); /* entry */
+ p++;
+ *p++ = xdr_one; /* bitmap length */
+ *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
+ *p++ = htonl(8); /* attribute buffer length */
+ p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
+
+ readdir->pgbase = (char *)p - (char *)start;
+ readdir->count -= readdir->pgbase;
+ kunmap_atomic(start);
+}
+
+static int nfs4_wait_clnt_recover(struct nfs_client *clp)
+{
+ int res;
+
+ might_sleep();
+
+ res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
+ nfs_wait_bit_killable, TASK_KILLABLE);
+ return res;
+}
+
+static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
+{
+ int res = 0;
+
+ might_sleep();
+
+ if (*timeout <= 0)
+ *timeout = NFS4_POLL_RETRY_MIN;
+ if (*timeout > NFS4_POLL_RETRY_MAX)
+ *timeout = NFS4_POLL_RETRY_MAX;
+ freezable_schedule_timeout_killable(*timeout);
+ if (fatal_signal_pending(current))
+ res = -ERESTARTSYS;
+ *timeout <<= 1;
+ return res;
+}
+
+/* This is the error handling routine for processes that are allowed
+ * to sleep.
+ */
+static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state *state = exception->state;
+ struct inode *inode = exception->inode;
+ int ret = errorcode;
+
+ exception->retry = 0;
+ switch(errorcode) {
+ case 0:
+ return 0;
+ case -NFS4ERR_OPENMODE:
+ if (inode && nfs_have_delegation(inode, FMODE_READ)) {
+ nfs_inode_return_delegation(inode);
+ exception->retry = 1;
+ return 0;
+ }
+ if (state == NULL)
+ break;
+ nfs4_schedule_stateid_recovery(server, state);
+ goto wait_on_recovery;
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ if (state == NULL)
+ break;
+ nfs_remove_bad_delegation(state->inode);
+ nfs4_schedule_stateid_recovery(server, state);
+ goto wait_on_recovery;
+ case -NFS4ERR_EXPIRED:
+ if (state != NULL)
+ nfs4_schedule_stateid_recovery(server, state);
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_STALE_CLIENTID:
+ nfs4_schedule_lease_recovery(clp);
+ goto wait_on_recovery;
+#if defined(CONFIG_NFS_V4_1)
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR: %d Reset session\n", __func__,
+ errorcode);
+ nfs4_schedule_session_recovery(clp->cl_session);
+ exception->retry = 1;
+ break;
+#endif /* defined(CONFIG_NFS_V4_1) */
+ case -NFS4ERR_FILE_OPEN:
+ if (exception->timeout > HZ) {
+ /* We have retried a decent amount, time to
+ * fail
+ */
+ ret = -EBUSY;
+ break;
+ }
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_DELAY:
+ case -EKEYEXPIRED:
+ ret = nfs4_delay(server->client, &exception->timeout);
+ if (ret != 0)
+ break;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ case -NFS4ERR_OLD_STATEID:
+ exception->retry = 1;
+ break;
+ case -NFS4ERR_BADOWNER:
+ /* The following works around a Linux server bug! */
+ case -NFS4ERR_BADNAME:
+ if (server->caps & NFS_CAP_UIDGID_NOMAP) {
+ server->caps &= ~NFS_CAP_UIDGID_NOMAP;
+ exception->retry = 1;
+ printk(KERN_WARNING "NFS: v4 server %s "
+ "does not accept raw "
+ "uid/gids. "
+ "Reenabling the idmapper.\n",
+ server->nfs_client->cl_hostname);
+ }
+ }
+ /* We failed to handle the error */
+ return nfs4_map_errors(ret);
+wait_on_recovery:
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret == 0)
+ exception->retry = 1;
+ return ret;
+}
+
+
+static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
+{
+ spin_lock(&clp->cl_lock);
+ if (time_before(clp->cl_last_renewal,timestamp))
+ clp->cl_last_renewal = timestamp;
+ spin_unlock(&clp->cl_lock);
+}
+
+static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
+{
+ do_renew_lease(server->nfs_client, timestamp);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+
+/*
+ * nfs4_free_slot - free a slot and efficiently update slot table.
+ *
+ * freeing a slot is trivially done by clearing its respective bit
+ * in the bitmap.
+ * If the freed slotid equals highest_used_slotid we want to update it
+ * so that the server would be able to size down the slot table if needed,
+ * otherwise we know that the highest_used_slotid is still in use.
+ * When updating highest_used_slotid there may be "holes" in the bitmap
+ * so we need to scan down from highest_used_slotid to 0 looking for the now
+ * highest slotid in use.
+ * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
+ *
+ * Must be called while holding tbl->slot_tbl_lock
+ */
+static void
+nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
+{
+ BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
+ /* clear used bit in bitmap */
+ __clear_bit(slotid, tbl->used_slots);
+
+ /* update highest_used_slotid when it is freed */
+ if (slotid == tbl->highest_used_slotid) {
+ slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
+ if (slotid < tbl->max_slots)
+ tbl->highest_used_slotid = slotid;
+ else
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
+ }
+ dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
+ slotid, tbl->highest_used_slotid);
+}
+
+bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
+{
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ return true;
+}
+
+/*
+ * Signal state manager thread if session fore channel is drained
+ */
+static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
+{
+ if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
+ rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
+ nfs4_set_task_privileged, NULL);
+ return;
+ }
+
+ if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
+ return;
+
+ dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
+ complete(&ses->fc_slot_table.complete);
+}
+
+/*
+ * Signal state manager thread if session back channel is drained
+ */
+void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
+{
+ if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
+ ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
+ return;
+ dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
+ complete(&ses->bc_slot_table.complete);
+}
+
+static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
+{
+ struct nfs4_slot_table *tbl;
+
+ tbl = &res->sr_session->fc_slot_table;
+ if (!res->sr_slot) {
+ /* just wake up the next guy waiting since
+ * we may have not consumed a slot after all */
+ dprintk("%s: No slot\n", __func__);
+ return;
+ }
+
+ spin_lock(&tbl->slot_tbl_lock);
+ nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
+ nfs4_check_drain_fc_complete(res->sr_session);
+ spin_unlock(&tbl->slot_tbl_lock);
+ res->sr_slot = NULL;
+}
+
+static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
+{
+ unsigned long timestamp;
+ struct nfs_client *clp;
+
+ /*
+ * sr_status remains 1 if an RPC level error occurred. The server
+ * may or may not have processed the sequence operation..
+ * Proceed as if the server received and processed the sequence
+ * operation.
+ */
+ if (res->sr_status == 1)
+ res->sr_status = NFS_OK;
+
+ /* don't increment the sequence number if the task wasn't sent */
+ if (!RPC_WAS_SENT(task))
+ goto out;
+
+ /* Check the SEQUENCE operation status */
+ switch (res->sr_status) {
+ case 0:
+ /* Update the slot's sequence and clientid lease timer */
+ ++res->sr_slot->seq_nr;
+ timestamp = res->sr_renewal_time;
+ clp = res->sr_session->clp;
+ do_renew_lease(clp, timestamp);
+ /* Check sequence flags */
+ if (res->sr_status_flags != 0)
+ nfs4_schedule_lease_recovery(clp);
+ break;
+ case -NFS4ERR_DELAY:
+ /* The server detected a resend of the RPC call and
+ * returned NFS4ERR_DELAY as per Section 2.10.6.2
+ * of RFC5661.
+ */
+ dprintk("%s: slot=%td seq=%d: Operation in progress\n",
+ __func__,
+ res->sr_slot - res->sr_session->fc_slot_table.slots,
+ res->sr_slot->seq_nr);
+ goto out_retry;
+ default:
+ /* Just update the slot sequence no. */
+ ++res->sr_slot->seq_nr;
+ }
+out:
+ /* The session may be reset by one of the error handlers. */
+ dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
+ nfs41_sequence_free_slot(res);
+ return 1;
+out_retry:
+ if (!rpc_restart_call(task))
+ goto out;
+ rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ return 0;
+}
+
+static int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
+{
+ if (res->sr_session == NULL)
+ return 1;
+ return nfs41_sequence_done(task, res);
+}
+
+/*
+ * nfs4_find_slot - efficiently look for a free slot
+ *
+ * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
+ * If found, we mark the slot as used, update the highest_used_slotid,
+ * and respectively set up the sequence operation args.
+ * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
+ *
+ * Note: must be called with under the slot_tbl_lock.
+ */
+static u32
+nfs4_find_slot(struct nfs4_slot_table *tbl)
+{
+ u32 slotid;
+ u32 ret_id = NFS4_NO_SLOT;
+
+ dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid,
+ tbl->max_slots);
+ slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
+ if (slotid >= tbl->max_slots)
+ goto out;
+ __set_bit(slotid, tbl->used_slots);
+ if (slotid > tbl->highest_used_slotid ||
+ tbl->highest_used_slotid == NFS4_NO_SLOT)
+ tbl->highest_used_slotid = slotid;
+ ret_id = slotid;
+out:
+ dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
+ __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
+ return ret_id;
+}
+
+static void nfs41_init_sequence(struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res, int cache_reply)
+{
+ args->sa_session = NULL;
+ args->sa_cache_this = 0;
+ if (cache_reply)
+ args->sa_cache_this = 1;
+ res->sr_session = NULL;
+ res->sr_slot = NULL;
+}
+
+int nfs41_setup_sequence(struct nfs4_session *session,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ struct rpc_task *task)
+{
+ struct nfs4_slot *slot;
+ struct nfs4_slot_table *tbl;
+ u32 slotid;
+
+ dprintk("--> %s\n", __func__);
+ /* slot already allocated? */
+ if (res->sr_slot != NULL)
+ return 0;
+
+ tbl = &session->fc_slot_table;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
+ !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
+ /* The state manager will wait until the slot table is empty */
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s session is draining\n", __func__);
+ return -EAGAIN;
+ }
+
+ if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
+ !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("%s enforce FIFO order\n", __func__);
+ return -EAGAIN;
+ }
+
+ slotid = nfs4_find_slot(tbl);
+ if (slotid == NFS4_NO_SLOT) {
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ spin_unlock(&tbl->slot_tbl_lock);
+ dprintk("<-- %s: no free slots\n", __func__);
+ return -EAGAIN;
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+
+ rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
+ slot = tbl->slots + slotid;
+ args->sa_session = session;
+ args->sa_slotid = slotid;
+
+ dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
+
+ res->sr_session = session;
+ res->sr_slot = slot;
+ res->sr_renewal_time = jiffies;
+ res->sr_status_flags = 0;
+ /*
+ * sr_status is only set in decode_sequence, and so will remain
+ * set to 1 if an rpc level failure occurs.
+ */
+ res->sr_status = 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
+
+int nfs4_setup_sequence(const struct nfs_server *server,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ struct rpc_task *task)
+{
+ struct nfs4_session *session = nfs4_get_session(server);
+ int ret = 0;
+
+ if (session == NULL)
+ goto out;
+
+ dprintk("--> %s clp %p session %p sr_slot %td\n",
+ __func__, session->clp, session, res->sr_slot ?
+ res->sr_slot - session->fc_slot_table.slots : -1);
+
+ ret = nfs41_setup_sequence(session, args, res, task);
+out:
+ dprintk("<-- %s status=%d\n", __func__, ret);
+ return ret;
+}
+
+struct nfs41_call_sync_data {
+ const struct nfs_server *seq_server;
+ struct nfs4_sequence_args *seq_args;
+ struct nfs4_sequence_res *seq_res;
+};
+
+static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs41_call_sync_data *data = calldata;
+
+ dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
+
+ if (nfs4_setup_sequence(data->seq_server, data->seq_args,
+ data->seq_res, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
+{
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ nfs41_call_sync_prepare(task, calldata);
+}
+
+static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs41_call_sync_data *data = calldata;
+
+ nfs41_sequence_done(task, data->seq_res);
+}
+
+static const struct rpc_call_ops nfs41_call_sync_ops = {
+ .rpc_call_prepare = nfs41_call_sync_prepare,
+ .rpc_call_done = nfs41_call_sync_done,
+};
+
+static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
+ .rpc_call_prepare = nfs41_call_priv_sync_prepare,
+ .rpc_call_done = nfs41_call_sync_done,
+};
+
+static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int privileged)
+{
+ int ret;
+ struct rpc_task *task;
+ struct nfs41_call_sync_data data = {
+ .seq_server = server,
+ .seq_args = args,
+ .seq_res = res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clnt,
+ .rpc_message = msg,
+ .callback_ops = &nfs41_call_sync_ops,
+ .callback_data = &data
+ };
+
+ if (privileged)
+ task_setup.callback_ops = &nfs41_call_priv_sync_ops;
+ task = rpc_run_task(&task_setup);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else {
+ ret = task->tk_status;
+ rpc_put_task(task);
+ }
+ return ret;
+}
+
+int _nfs4_call_sync_session(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ nfs41_init_sequence(args, res, cache_reply);
+ return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
+}
+
+#else
+static inline
+void nfs41_init_sequence(struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res, int cache_reply)
+{
+}
+
+static int nfs4_sequence_done(struct rpc_task *task,
+ struct nfs4_sequence_res *res)
+{
+ return 1;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+int _nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ nfs41_init_sequence(args, res, cache_reply);
+ return rpc_call_sync(clnt, msg, 0);
+}
+
+static inline
+int nfs4_call_sync(struct rpc_clnt *clnt,
+ struct nfs_server *server,
+ struct rpc_message *msg,
+ struct nfs4_sequence_args *args,
+ struct nfs4_sequence_res *res,
+ int cache_reply)
+{
+ return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
+ args, res, cache_reply);
+}
+
+static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
+{
+ struct nfs_inode *nfsi = NFS_I(dir);
+
+ spin_lock(&dir->i_lock);
+ nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA;
+ if (!cinfo->atomic || cinfo->before != dir->i_version)
+ nfs_force_lookup_revalidate(dir);
+ dir->i_version = cinfo->after;
+ spin_unlock(&dir->i_lock);
+}
+
+struct nfs4_opendata {
+ struct kref kref;
+ struct nfs_openargs o_arg;
+ struct nfs_openres o_res;
+ struct nfs_open_confirmargs c_arg;
+ struct nfs_open_confirmres c_res;
+ struct nfs4_string owner_name;
+ struct nfs4_string group_name;
+ struct nfs_fattr f_attr;
+ struct nfs_fattr dir_attr;
+ struct dentry *dir;
+ struct dentry *dentry;
+ struct nfs4_state_owner *owner;
+ struct nfs4_state *state;
+ struct iattr attrs;
+ unsigned long timestamp;
+ unsigned int rpc_done : 1;
+ int rpc_status;
+ int cancelled;
+};
+
+
+static void nfs4_init_opendata_res(struct nfs4_opendata *p)
+{
+ p->o_res.f_attr = &p->f_attr;
+ p->o_res.dir_attr = &p->dir_attr;
+ p->o_res.seqid = p->o_arg.seqid;
+ p->c_res.seqid = p->c_arg.seqid;
+ p->o_res.server = p->o_arg.server;
+ nfs_fattr_init(&p->f_attr);
+ nfs_fattr_init(&p->dir_attr);
+ nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
+}
+
+static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
+ struct nfs4_state_owner *sp, fmode_t fmode, int flags,
+ const struct iattr *attrs,
+ gfp_t gfp_mask)
+{
+ struct dentry *parent = dget_parent(dentry);
+ struct inode *dir = parent->d_inode;
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs4_opendata *p;
+
+ p = kzalloc(sizeof(*p), gfp_mask);
+ if (p == NULL)
+ goto err;
+ p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
+ if (p->o_arg.seqid == NULL)
+ goto err_free;
+ nfs_sb_active(dentry->d_sb);
+ p->dentry = dget(dentry);
+ p->dir = parent;
+ p->owner = sp;
+ atomic_inc(&sp->so_count);
+ p->o_arg.fh = NFS_FH(dir);
+ p->o_arg.open_flags = flags;
+ p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
+ p->o_arg.clientid = server->nfs_client->cl_clientid;
+ p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
+ p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
+ p->o_arg.name = &dentry->d_name;
+ p->o_arg.server = server;
+ p->o_arg.bitmask = server->attr_bitmask;
+ p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
+ p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
+ if (attrs != NULL && attrs->ia_valid != 0) {
+ __be32 verf[2];
+
+ p->o_arg.u.attrs = &p->attrs;
+ memcpy(&p->attrs, attrs, sizeof(p->attrs));
+
+ verf[0] = jiffies;
+ verf[1] = current->pid;
+ memcpy(p->o_arg.u.verifier.data, verf,
+ sizeof(p->o_arg.u.verifier.data));
+ }
+ p->c_arg.fh = &p->o_res.fh;
+ p->c_arg.stateid = &p->o_res.stateid;
+ p->c_arg.seqid = p->o_arg.seqid;
+ nfs4_init_opendata_res(p);
+ kref_init(&p->kref);
+ return p;
+err_free:
+ kfree(p);
+err:
+ dput(parent);
+ return NULL;
+}
+
+static void nfs4_opendata_free(struct kref *kref)
+{
+ struct nfs4_opendata *p = container_of(kref,
+ struct nfs4_opendata, kref);
+ struct super_block *sb = p->dentry->d_sb;
+
+ nfs_free_seqid(p->o_arg.seqid);
+ if (p->state != NULL)
+ nfs4_put_open_state(p->state);
+ nfs4_put_state_owner(p->owner);
+ dput(p->dir);
+ dput(p->dentry);
+ nfs_sb_deactive(sb);
+ nfs_fattr_free_names(&p->f_attr);
+ kfree(p);
+}
+
+static void nfs4_opendata_put(struct nfs4_opendata *p)
+{
+ if (p != NULL)
+ kref_put(&p->kref, nfs4_opendata_free);
+}
+
+static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
+{
+ int ret;
+
+ ret = rpc_wait_for_completion_task(task);
+ return ret;
+}
+
+static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
+{
+ int ret = 0;
+
+ if (open_mode & (O_EXCL|O_TRUNC))
+ goto out;
+ switch (mode & (FMODE_READ|FMODE_WRITE)) {
+ case FMODE_READ:
+ ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
+ && state->n_rdonly != 0;
+ break;
+ case FMODE_WRITE:
+ ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
+ && state->n_wronly != 0;
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
+ && state->n_rdwr != 0;
+ }
+out:
+ return ret;
+}
+
+static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
+{
+ if (delegation == NULL)
+ return 0;
+ if ((delegation->type & fmode) != fmode)
+ return 0;
+ if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
+ return 0;
+ nfs_mark_delegation_referenced(delegation);
+ return 1;
+}
+
+static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
+{
+ switch (fmode) {
+ case FMODE_WRITE:
+ state->n_wronly++;
+ break;
+ case FMODE_READ:
+ state->n_rdonly++;
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ state->n_rdwr++;
+ }
+ nfs4_state_set_mode_locked(state, state->state | fmode);
+}
+
+static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
+{
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
+ nfs4_stateid_copy(&state->stateid, stateid);
+ nfs4_stateid_copy(&state->open_stateid, stateid);
+ switch (fmode) {
+ case FMODE_READ:
+ set_bit(NFS_O_RDONLY_STATE, &state->flags);
+ break;
+ case FMODE_WRITE:
+ set_bit(NFS_O_WRONLY_STATE, &state->flags);
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ set_bit(NFS_O_RDWR_STATE, &state->flags);
+ }
+}
+
+static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
+{
+ write_seqlock(&state->seqlock);
+ nfs_set_open_stateid_locked(state, stateid, fmode);
+ write_sequnlock(&state->seqlock);
+}
+
+static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
+{
+ /*
+ * Protect the call to nfs4_state_set_mode_locked and
+ * serialise the stateid update
+ */
+ write_seqlock(&state->seqlock);
+ if (deleg_stateid != NULL) {
+ nfs4_stateid_copy(&state->stateid, deleg_stateid);
+ set_bit(NFS_DELEGATED_STATE, &state->flags);
+ }
+ if (open_stateid != NULL)
+ nfs_set_open_stateid_locked(state, open_stateid, fmode);
+ write_sequnlock(&state->seqlock);
+ spin_lock(&state->owner->so_lock);
+ update_open_stateflags(state, fmode);
+ spin_unlock(&state->owner->so_lock);
+}
+
+static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
+{
+ struct nfs_inode *nfsi = NFS_I(state->inode);
+ struct nfs_delegation *deleg_cur;
+ int ret = 0;
+
+ fmode &= (FMODE_READ|FMODE_WRITE);
+
+ rcu_read_lock();
+ deleg_cur = rcu_dereference(nfsi->delegation);
+ if (deleg_cur == NULL)
+ goto no_delegation;
+
+ spin_lock(&deleg_cur->lock);
+ if (nfsi->delegation != deleg_cur ||
+ (deleg_cur->type & fmode) != fmode)
+ goto no_delegation_unlock;
+
+ if (delegation == NULL)
+ delegation = &deleg_cur->stateid;
+ else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
+ goto no_delegation_unlock;
+
+ nfs_mark_delegation_referenced(deleg_cur);
+ __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
+ ret = 1;
+no_delegation_unlock:
+ spin_unlock(&deleg_cur->lock);
+no_delegation:
+ rcu_read_unlock();
+
+ if (!ret && open_stateid != NULL) {
+ __update_open_stateid(state, open_stateid, NULL, fmode);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+
+static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
+{
+ struct nfs_delegation *delegation;
+
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation == NULL || (delegation->type & fmode) == fmode) {
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+ nfs_inode_return_delegation(inode);
+}
+
+static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
+{
+ struct nfs4_state *state = opendata->state;
+ struct nfs_inode *nfsi = NFS_I(state->inode);
+ struct nfs_delegation *delegation;
+ int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
+ fmode_t fmode = opendata->o_arg.fmode;
+ nfs4_stateid stateid;
+ int ret = -EAGAIN;
+
+ for (;;) {
+ if (can_open_cached(state, fmode, open_mode)) {
+ spin_lock(&state->owner->so_lock);
+ if (can_open_cached(state, fmode, open_mode)) {
+ update_open_stateflags(state, fmode);
+ spin_unlock(&state->owner->so_lock);
+ goto out_return_state;
+ }
+ spin_unlock(&state->owner->so_lock);
+ }
+ rcu_read_lock();
+ delegation = rcu_dereference(nfsi->delegation);
+ if (!can_open_delegated(delegation, fmode)) {
+ rcu_read_unlock();
+ break;
+ }
+ /* Save the delegation */
+ nfs4_stateid_copy(&stateid, &delegation->stateid);
+ rcu_read_unlock();
+ ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
+ if (ret != 0)
+ goto out;
+ ret = -EAGAIN;
+
+ /* Try to update the stateid using the delegation */
+ if (update_open_stateid(state, NULL, &stateid, fmode))
+ goto out_return_state;
+ }
+out:
+ return ERR_PTR(ret);
+out_return_state:
+ atomic_inc(&state->count);
+ return state;
+}
+
+static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
+{
+ struct inode *inode;
+ struct nfs4_state *state = NULL;
+ struct nfs_delegation *delegation;
+ int ret;
+
+ if (!data->rpc_done) {
+ state = nfs4_try_open_cached(data);
+ goto out;
+ }
+
+ ret = -EAGAIN;
+ if (!(data->f_attr.valid & NFS_ATTR_FATTR))
+ goto err;
+ inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
+ ret = PTR_ERR(inode);
+ if (IS_ERR(inode))
+ goto err;
+ ret = -ENOMEM;
+ state = nfs4_get_open_state(inode, data->owner);
+ if (state == NULL)
+ goto err_put_inode;
+ if (data->o_res.delegation_type != 0) {
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ int delegation_flags = 0;
+
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation)
+ delegation_flags = delegation->flags;
+ rcu_read_unlock();
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+ pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+ "returning a delegation for "
+ "OPEN(CLAIM_DELEGATE_CUR)\n",
+ clp->cl_hostname);
+ } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+ nfs_inode_set_delegation(state->inode,
+ data->owner->so_cred,
+ &data->o_res);
+ else
+ nfs_inode_reclaim_delegation(state->inode,
+ data->owner->so_cred,
+ &data->o_res);
+ }
+
+ update_open_stateid(state, &data->o_res.stateid, NULL,
+ data->o_arg.fmode);
+ iput(inode);
+out:
+ return state;
+err_put_inode:
+ iput(inode);
+err:
+ return ERR_PTR(ret);
+}
+
+static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
+{
+ struct nfs_inode *nfsi = NFS_I(state->inode);
+ struct nfs_open_context *ctx;
+
+ spin_lock(&state->inode->i_lock);
+ list_for_each_entry(ctx, &nfsi->open_files, list) {
+ if (ctx->state != state)
+ continue;
+ get_nfs_open_context(ctx);
+ spin_unlock(&state->inode->i_lock);
+ return ctx;
+ }
+ spin_unlock(&state->inode->i_lock);
+ return ERR_PTR(-ENOENT);
+}
+
+static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
+{
+ struct nfs4_opendata *opendata;
+
+ opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
+ if (opendata == NULL)
+ return ERR_PTR(-ENOMEM);
+ opendata->state = state;
+ atomic_inc(&state->count);
+ return opendata;
+}
+
+static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
+{
+ struct nfs4_state *newstate;
+ int ret;
+
+ opendata->o_arg.open_flags = 0;
+ opendata->o_arg.fmode = fmode;
+ memset(&opendata->o_res, 0, sizeof(opendata->o_res));
+ memset(&opendata->c_res, 0, sizeof(opendata->c_res));
+ nfs4_init_opendata_res(opendata);
+ ret = _nfs4_recover_proc_open(opendata);
+ if (ret != 0)
+ return ret;
+ newstate = nfs4_opendata_to_nfs4_state(opendata);
+ if (IS_ERR(newstate))
+ return PTR_ERR(newstate);
+ nfs4_close_state(newstate, fmode);
+ *res = newstate;
+ return 0;
+}
+
+static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
+{
+ struct nfs4_state *newstate;
+ int ret;
+
+ /* memory barrier prior to reading state->n_* */
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ smp_rmb();
+ if (state->n_rdwr != 0) {
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
+ if (ret != 0)
+ return ret;
+ if (newstate != state)
+ return -ESTALE;
+ }
+ if (state->n_wronly != 0) {
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+ ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
+ if (ret != 0)
+ return ret;
+ if (newstate != state)
+ return -ESTALE;
+ }
+ if (state->n_rdonly != 0) {
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
+ if (ret != 0)
+ return ret;
+ if (newstate != state)
+ return -ESTALE;
+ }
+ /*
+ * We may have performed cached opens for all three recoveries.
+ * Check if we need to update the current stateid.
+ */
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
+ !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
+ write_seqlock(&state->seqlock);
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
+ nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+ write_sequnlock(&state->seqlock);
+ }
+ return 0;
+}
+
+/*
+ * OPEN_RECLAIM:
+ * reclaim state on the server after a reboot.
+ */
+static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
+{
+ struct nfs_delegation *delegation;
+ struct nfs4_opendata *opendata;
+ fmode_t delegation_type = 0;
+ int status;
+
+ opendata = nfs4_open_recoverdata_alloc(ctx, state);
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
+ opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
+ opendata->o_arg.fh = NFS_FH(state->inode);
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(state->inode)->delegation);
+ if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
+ delegation_type = delegation->type;
+ rcu_read_unlock();
+ opendata->o_arg.u.delegation_type = delegation_type;
+ status = nfs4_open_recover(opendata, state);
+ nfs4_opendata_put(opendata);
+ return status;
+}
+
+static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
+{
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = _nfs4_do_open_reclaim(ctx, state);
+ if (err != -NFS4ERR_DELAY)
+ break;
+ nfs4_handle_exception(server, err, &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+ struct nfs_open_context *ctx;
+ int ret;
+
+ ctx = nfs4_state_find_open_context(state);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ ret = nfs4_do_open_reclaim(ctx, state);
+ put_nfs_open_context(ctx);
+ return ret;
+}
+
+static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
+{
+ struct nfs4_opendata *opendata;
+ int ret;
+
+ opendata = nfs4_open_recoverdata_alloc(ctx, state);
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
+ opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
+ nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+ ret = nfs4_open_recover(opendata, state);
+ nfs4_opendata_put(opendata);
+ return ret;
+}
+
+int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
+{
+ struct nfs4_exception exception = { };
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ int err;
+ do {
+ err = _nfs4_open_delegation_recall(ctx, state, stateid);
+ switch (err) {
+ case 0:
+ case -ENOENT:
+ case -ESTALE:
+ goto out;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+ goto out;
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ /* Don't recall a delegation if it was lost */
+ nfs4_schedule_lease_recovery(server->nfs_client);
+ goto out;
+ case -ERESTARTSYS:
+ /*
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ nfs_inode_find_state_and_recover(state->inode,
+ stateid);
+ nfs4_schedule_stateid_recovery(server, state);
+ case -EKEYEXPIRED:
+ /*
+ * User RPCSEC_GSS context has expired.
+ * We cannot recover this stateid now, so
+ * skip it and allow recovery thread to
+ * proceed.
+ */
+ case -ENOMEM:
+ err = 0;
+ goto out;
+ }
+ err = nfs4_handle_exception(server, err, &exception);
+ } while (exception.retry);
+out:
+ return err;
+}
+
+static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
+
+ data->rpc_status = task->tk_status;
+ if (data->rpc_status == 0) {
+ nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
+ nfs_confirm_seqid(&data->owner->so_seqid, 0);
+ renew_lease(data->o_res.server, data->timestamp);
+ data->rpc_done = 1;
+ }
+}
+
+static void nfs4_open_confirm_release(void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
+ struct nfs4_state *state = NULL;
+
+ /* If this request hasn't been cancelled, do nothing */
+ if (data->cancelled == 0)
+ goto out_free;
+ /* In case of error, no cleanup! */
+ if (!data->rpc_done)
+ goto out_free;
+ state = nfs4_opendata_to_nfs4_state(data);
+ if (!IS_ERR(state))
+ nfs4_close_state(state, data->o_arg.fmode);
+out_free:
+ nfs4_opendata_put(data);
+}
+
+static const struct rpc_call_ops nfs4_open_confirm_ops = {
+ .rpc_call_done = nfs4_open_confirm_done,
+ .rpc_release = nfs4_open_confirm_release,
+};
+
+/*
+ * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
+ */
+static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
+{
+ struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
+ .rpc_argp = &data->c_arg,
+ .rpc_resp = &data->c_res,
+ .rpc_cred = data->owner->so_cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_open_confirm_ops,
+ .callback_data = data,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status;
+
+ kref_get(&data->kref);
+ data->rpc_done = 0;
+ data->rpc_status = 0;
+ data->timestamp = jiffies;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0) {
+ data->cancelled = 1;
+ smp_wmb();
+ } else
+ status = data->rpc_status;
+ rpc_put_task(task);
+ return status;
+}
+
+static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
+ struct nfs4_state_owner *sp = data->owner;
+
+ if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
+ return;
+ /*
+ * Check if we still need to send an OPEN call, or if we can use
+ * a delegation instead.
+ */
+ if (data->state != NULL) {
+ struct nfs_delegation *delegation;
+
+ if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
+ goto out_no_action;
+ rcu_read_lock();
+ delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
+ if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+ can_open_delegated(delegation, data->o_arg.fmode))
+ goto unlock_no_action;
+ rcu_read_unlock();
+ }
+ /* Update client id. */
+ data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
+ if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
+ task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
+ nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
+ }
+ data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->o_arg.server,
+ &data->o_arg.seq_args,
+ &data->o_res.seq_res, task))
+ return;
+ rpc_call_start(task);
+ return;
+unlock_no_action:
+ rcu_read_unlock();
+out_no_action:
+ task->tk_action = NULL;
+
+}
+
+static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
+{
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ nfs4_open_prepare(task, calldata);
+}
+
+static void nfs4_open_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
+
+ data->rpc_status = task->tk_status;
+
+ if (!nfs4_sequence_done(task, &data->o_res.seq_res))
+ return;
+
+ if (task->tk_status == 0) {
+ switch (data->o_res.f_attr->mode & S_IFMT) {
+ case S_IFREG:
+ break;
+ case S_IFLNK:
+ data->rpc_status = -ELOOP;
+ break;
+ case S_IFDIR:
+ data->rpc_status = -EISDIR;
+ break;
+ default:
+ data->rpc_status = -ENOTDIR;
+ }
+ renew_lease(data->o_res.server, data->timestamp);
+ if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
+ nfs_confirm_seqid(&data->owner->so_seqid, 0);
+ }
+ data->rpc_done = 1;
+}
+
+static void nfs4_open_release(void *calldata)
+{
+ struct nfs4_opendata *data = calldata;
+ struct nfs4_state *state = NULL;
+
+ /* If this request hasn't been cancelled, do nothing */
+ if (data->cancelled == 0)
+ goto out_free;
+ /* In case of error, no cleanup! */
+ if (data->rpc_status != 0 || !data->rpc_done)
+ goto out_free;
+ /* In case we need an open_confirm, no cleanup! */
+ if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
+ goto out_free;
+ state = nfs4_opendata_to_nfs4_state(data);
+ if (!IS_ERR(state))
+ nfs4_close_state(state, data->o_arg.fmode);
+out_free:
+ nfs4_opendata_put(data);
+}
+
+static const struct rpc_call_ops nfs4_open_ops = {
+ .rpc_call_prepare = nfs4_open_prepare,
+ .rpc_call_done = nfs4_open_done,
+ .rpc_release = nfs4_open_release,
+};
+
+static const struct rpc_call_ops nfs4_recover_open_ops = {
+ .rpc_call_prepare = nfs4_recover_open_prepare,
+ .rpc_call_done = nfs4_open_done,
+ .rpc_release = nfs4_open_release,
+};
+
+static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
+{
+ struct inode *dir = data->dir->d_inode;
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_openargs *o_arg = &data->o_arg;
+ struct nfs_openres *o_res = &data->o_res;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
+ .rpc_argp = o_arg,
+ .rpc_resp = o_res,
+ .rpc_cred = data->owner->so_cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_open_ops,
+ .callback_data = data,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status;
+
+ nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
+ kref_get(&data->kref);
+ data->rpc_done = 0;
+ data->rpc_status = 0;
+ data->cancelled = 0;
+ if (isrecover)
+ task_setup_data.callback_ops = &nfs4_recover_open_ops;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0) {
+ data->cancelled = 1;
+ smp_wmb();
+ } else
+ status = data->rpc_status;
+ rpc_put_task(task);
+
+ return status;
+}
+
+static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
+{
+ struct inode *dir = data->dir->d_inode;
+ struct nfs_openres *o_res = &data->o_res;
+ int status;
+
+ status = nfs4_run_open_task(data, 1);
+ if (status != 0 || !data->rpc_done)
+ return status;
+
+ nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
+
+ nfs_refresh_inode(dir, o_res->dir_attr);
+
+ if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
+ status = _nfs4_proc_open_confirm(data);
+ if (status != 0)
+ return status;
+ }
+
+ return status;
+}
+
+/*
+ * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
+ */
+static int _nfs4_proc_open(struct nfs4_opendata *data)
+{
+ struct inode *dir = data->dir->d_inode;
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_openargs *o_arg = &data->o_arg;
+ struct nfs_openres *o_res = &data->o_res;
+ int status;
+
+ status = nfs4_run_open_task(data, 0);
+ if (!data->rpc_done)
+ return status;
+ if (status != 0) {
+ if (status == -NFS4ERR_BADNAME &&
+ !(o_arg->open_flags & O_CREAT))
+ return -ENOENT;
+ return status;
+ }
+
+ nfs_fattr_map_and_free_names(server, &data->f_attr);
+
+ if (o_arg->open_flags & O_CREAT) {
+ update_changeattr(dir, &o_res->cinfo);
+ nfs_post_op_update_inode(dir, o_res->dir_attr);
+ } else
+ nfs_refresh_inode(dir, o_res->dir_attr);
+ if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
+ server->caps &= ~NFS_CAP_POSIX_LOCK;
+ if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
+ status = _nfs4_proc_open_confirm(data);
+ if (status != 0)
+ return status;
+ }
+ if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
+ _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
+ return 0;
+}
+
+static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
+{
+ unsigned int loop;
+ int ret;
+
+ for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret != 0)
+ break;
+ if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
+ !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
+ break;
+ nfs4_schedule_state_manager(clp);
+ ret = -EIO;
+ }
+ return ret;
+}
+
+static int nfs4_recover_expired_lease(struct nfs_server *server)
+{
+ return nfs4_client_recover_expired_lease(server->nfs_client);
+}
+
+/*
+ * OPEN_EXPIRED:
+ * reclaim state on the server after a network partition.
+ * Assumes caller holds the appropriate lock
+ */
+static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
+{
+ struct nfs4_opendata *opendata;
+ int ret;
+
+ opendata = nfs4_open_recoverdata_alloc(ctx, state);
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
+ ret = nfs4_open_recover(opendata, state);
+ if (ret == -ESTALE)
+ d_drop(ctx->dentry);
+ nfs4_opendata_put(opendata);
+ return ret;
+}
+
+static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
+{
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = _nfs4_open_expired(ctx, state);
+ switch (err) {
+ default:
+ goto out;
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_DELAY:
+ nfs4_handle_exception(server, err, &exception);
+ err = 0;
+ }
+ } while (exception.retry);
+out:
+ return err;
+}
+
+static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+ struct nfs_open_context *ctx;
+ int ret;
+
+ ctx = nfs4_state_find_open_context(state);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+ ret = nfs4_do_open_expired(ctx, state);
+ put_nfs_open_context(ctx);
+ return ret;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
+{
+ int status = NFS_OK;
+ struct nfs_server *server = NFS_SERVER(state->inode);
+
+ if (state->flags & flags) {
+ status = nfs41_test_stateid(server, stateid);
+ if (status != NFS_OK) {
+ nfs41_free_stateid(server, stateid);
+ state->flags &= ~flags;
+ }
+ }
+ return status;
+}
+
+static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+ int deleg_status, open_status;
+ int deleg_flags = 1 << NFS_DELEGATED_STATE;
+ int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
+
+ deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
+ open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags);
+
+ if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
+ return NFS_OK;
+ return nfs4_open_expired(sp, state);
+}
+#endif
+
+/*
+ * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
+ * fields corresponding to attributes that were used to store the verifier.
+ * Make sure we clobber those fields in the later setattr call
+ */
+static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
+{
+ if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
+ !(sattr->ia_valid & ATTR_ATIME_SET))
+ sattr->ia_valid |= ATTR_ATIME;
+
+ if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
+ !(sattr->ia_valid & ATTR_MTIME_SET))
+ sattr->ia_valid |= ATTR_MTIME;
+}
+
+/*
+ * Returns a referenced nfs4_state
+ */
+static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
+{
+ struct nfs4_state_owner *sp;
+ struct nfs4_state *state = NULL;
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs4_opendata *opendata;
+ int status;
+
+ /* Protect against reboot recovery conflicts */
+ status = -ENOMEM;
+ sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
+ if (sp == NULL) {
+ dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
+ goto out_err;
+ }
+ status = nfs4_recover_expired_lease(server);
+ if (status != 0)
+ goto err_put_state_owner;
+ if (dentry->d_inode != NULL)
+ nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
+ status = -ENOMEM;
+ opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
+ if (opendata == NULL)
+ goto err_put_state_owner;
+
+ if (dentry->d_inode != NULL)
+ opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
+
+ status = _nfs4_proc_open(opendata);
+ if (status != 0)
+ goto err_opendata_put;
+
+ state = nfs4_opendata_to_nfs4_state(opendata);
+ status = PTR_ERR(state);
+ if (IS_ERR(state))
+ goto err_opendata_put;
+ if (server->caps & NFS_CAP_POSIX_LOCK)
+ set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+
+ if (opendata->o_arg.open_flags & O_EXCL) {
+ nfs4_exclusive_attrset(opendata, sattr);
+
+ nfs_fattr_init(opendata->o_res.f_attr);
+ status = nfs4_do_setattr(state->inode, cred,
+ opendata->o_res.f_attr, sattr,
+ state);
+ if (status == 0)
+ nfs_setattr_update_inode(state->inode, sattr);
+ nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
+ }
+ nfs4_opendata_put(opendata);
+ nfs4_put_state_owner(sp);
+ *res = state;
+ return 0;
+err_opendata_put:
+ nfs4_opendata_put(opendata);
+err_put_state_owner:
+ nfs4_put_state_owner(sp);
+out_err:
+ *res = NULL;
+ return status;
+}
+
+
+static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
+{
+ struct nfs4_exception exception = { };
+ struct nfs4_state *res;
+ int status;
+
+ fmode &= FMODE_READ|FMODE_WRITE;
+ do {
+ status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res);
+ if (status == 0)
+ break;
+ /* NOTE: BAD_SEQID means the server and client disagree about the
+ * book-keeping w.r.t. state-changing operations
+ * (OPEN/CLOSE/LOCK/LOCKU...)
+ * It is actually a sign of a bug on the client or on the server.
+ *
+ * If we receive a BAD_SEQID error in the particular case of
+ * doing an OPEN, we assume that nfs_increment_open_seqid() will
+ * have unhashed the old state_owner for us, and that we can
+ * therefore safely retry using a new one. We should still warn
+ * the user though...
+ */
+ if (status == -NFS4ERR_BAD_SEQID) {
+ pr_warn_ratelimited("NFS: v4 server %s "
+ " returned a bad sequence-id error!\n",
+ NFS_SERVER(dir)->nfs_client->cl_hostname);
+ exception.retry = 1;
+ continue;
+ }
+ /*
+ * BAD_STATEID on OPEN means that the server cancelled our
+ * state before it received the OPEN_CONFIRM.
+ * Recover by retrying the request as per the discussion
+ * on Page 181 of RFC3530.
+ */
+ if (status == -NFS4ERR_BAD_STATEID) {
+ exception.retry = 1;
+ continue;
+ }
+ if (status == -EAGAIN) {
+ /* We must have found a delegation */
+ exception.retry = 1;
+ continue;
+ }
+ res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
+ status, &exception));
+ } while (exception.retry);
+ return res;
+}
+
+static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_fattr *fattr, struct iattr *sattr,
+ struct nfs4_state *state)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_setattrargs arg = {
+ .fh = NFS_FH(inode),
+ .iap = sattr,
+ .server = server,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs_setattrres res = {
+ .fattr = fattr,
+ .server = server,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ unsigned long timestamp = jiffies;
+ int status;
+
+ nfs_fattr_init(fattr);
+
+ if (state != NULL) {
+ nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
+ current->files, current->tgid);
+ } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
+ FMODE_WRITE)) {
+ /* Use that stateid */
+ } else
+ nfs4_stateid_copy(&arg.stateid, &zero_stateid);
+
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+ if (status == 0 && state != NULL)
+ renew_lease(server, timestamp);
+ return status;
+}
+
+static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
+ struct nfs_fattr *fattr, struct iattr *sattr,
+ struct nfs4_state *state)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_exception exception = {
+ .state = state,
+ .inode = inode,
+ };
+ int err;
+ do {
+ err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
+ switch (err) {
+ case -NFS4ERR_OPENMODE:
+ if (state && !(state->state & FMODE_WRITE)) {
+ err = -EBADF;
+ if (sattr->ia_valid & ATTR_OPEN)
+ err = -EACCES;
+ goto out;
+ }
+ }
+ err = nfs4_handle_exception(server, err, &exception);
+ } while (exception.retry);
+out:
+ return err;
+}
+
+struct nfs4_closedata {
+ struct inode *inode;
+ struct nfs4_state *state;
+ struct nfs_closeargs arg;
+ struct nfs_closeres res;
+ struct nfs_fattr fattr;
+ unsigned long timestamp;
+ bool roc;
+ u32 roc_barrier;
+};
+
+static void nfs4_free_closedata(void *data)
+{
+ struct nfs4_closedata *calldata = data;
+ struct nfs4_state_owner *sp = calldata->state->owner;
+ struct super_block *sb = calldata->state->inode->i_sb;
+
+ if (calldata->roc)
+ pnfs_roc_release(calldata->state->inode);
+ nfs4_put_open_state(calldata->state);
+ nfs_free_seqid(calldata->arg.seqid);
+ nfs4_put_state_owner(sp);
+ nfs_sb_deactive(sb);
+ kfree(calldata);
+}
+
+static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
+ fmode_t fmode)
+{
+ spin_lock(&state->owner->so_lock);
+ if (!(fmode & FMODE_READ))
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ if (!(fmode & FMODE_WRITE))
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ spin_unlock(&state->owner->so_lock);
+}
+
+static void nfs4_close_done(struct rpc_task *task, void *data)
+{
+ struct nfs4_closedata *calldata = data;
+ struct nfs4_state *state = calldata->state;
+ struct nfs_server *server = NFS_SERVER(calldata->inode);
+
+ dprintk("%s: begin!\n", __func__);
+ if (!nfs4_sequence_done(task, &calldata->res.seq_res))
+ return;
+ /* hmm. we are done with the inode, and in the process of freeing
+ * the state_owner. we keep this around to process errors
+ */
+ switch (task->tk_status) {
+ case 0:
+ if (calldata->roc)
+ pnfs_roc_set_barrier(state->inode,
+ calldata->roc_barrier);
+ nfs_set_open_stateid(state, &calldata->res.stateid, 0);
+ renew_lease(server, calldata->timestamp);
+ nfs4_close_clear_stateid_flags(state,
+ calldata->arg.fmode);
+ break;
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ if (calldata->arg.fmode == 0)
+ break;
+ default:
+ if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
+ rpc_restart_call_prepare(task);
+ }
+ nfs_release_seqid(calldata->arg.seqid);
+ nfs_refresh_inode(calldata->inode, calldata->res.fattr);
+ dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
+}
+
+static void nfs4_close_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_closedata *calldata = data;
+ struct nfs4_state *state = calldata->state;
+ int call_close = 0;
+
+ dprintk("%s: begin!\n", __func__);
+ if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
+ return;
+
+ task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
+ calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
+ spin_lock(&state->owner->so_lock);
+ /* Calculate the change in open mode */
+ if (state->n_rdwr == 0) {
+ if (state->n_rdonly == 0) {
+ call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ calldata->arg.fmode &= ~FMODE_READ;
+ }
+ if (state->n_wronly == 0) {
+ call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ calldata->arg.fmode &= ~FMODE_WRITE;
+ }
+ }
+ spin_unlock(&state->owner->so_lock);
+
+ if (!call_close) {
+ /* Note: exit _without_ calling nfs4_close_done */
+ task->tk_action = NULL;
+ goto out;
+ }
+
+ if (calldata->arg.fmode == 0) {
+ task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
+ if (calldata->roc &&
+ pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
+ rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
+ task, NULL);
+ goto out;
+ }
+ }
+
+ nfs_fattr_init(calldata->res.fattr);
+ calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
+ &calldata->arg.seq_args,
+ &calldata->res.seq_res,
+ task))
+ goto out;
+ rpc_call_start(task);
+out:
+ dprintk("%s: done!\n", __func__);
+}
+
+static const struct rpc_call_ops nfs4_close_ops = {
+ .rpc_call_prepare = nfs4_close_prepare,
+ .rpc_call_done = nfs4_close_done,
+ .rpc_release = nfs4_free_closedata,
+};
+
+/*
+ * It is possible for data to be read/written from a mem-mapped file
+ * after the sys_close call (which hits the vfs layer as a flush).
+ * This means that we can't safely call nfsv4 close on a file until
+ * the inode is cleared. This in turn means that we are not good
+ * NFSv4 citizens - we do not indicate to the server to update the file's
+ * share state even when we are done with one of the three share
+ * stateid's in the inode.
+ *
+ * NOTE: Caller must be holding the sp->so_owner semaphore!
+ */
+int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
+{
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs4_closedata *calldata;
+ struct nfs4_state_owner *sp = state->owner;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
+ .rpc_cred = state->owner->so_cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_close_ops,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status = -ENOMEM;
+
+ calldata = kzalloc(sizeof(*calldata), gfp_mask);
+ if (calldata == NULL)
+ goto out;
+ nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
+ calldata->inode = state->inode;
+ calldata->state = state;
+ calldata->arg.fh = NFS_FH(state->inode);
+ calldata->arg.stateid = &state->open_stateid;
+ /* Serialization for the sequence id */
+ calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
+ if (calldata->arg.seqid == NULL)
+ goto out_free_calldata;
+ calldata->arg.fmode = 0;
+ calldata->arg.bitmask = server->cache_consistency_bitmask;
+ calldata->res.fattr = &calldata->fattr;
+ calldata->res.seqid = calldata->arg.seqid;
+ calldata->res.server = server;
+ calldata->roc = roc;
+ nfs_sb_active(calldata->inode->i_sb);
+
+ msg.rpc_argp = &calldata->arg;
+ msg.rpc_resp = &calldata->res;
+ task_setup_data.callback_data = calldata;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = 0;
+ if (wait)
+ status = rpc_wait_for_completion_task(task);
+ rpc_put_task(task);
+ return status;
+out_free_calldata:
+ kfree(calldata);
+out:
+ if (roc)
+ pnfs_roc_release(state->inode);
+ nfs4_put_open_state(state);
+ nfs4_put_state_owner(sp);
+ return status;
+}
+
+static struct inode *
+nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
+{
+ struct nfs4_state *state;
+
+ /* Protect against concurrent sillydeletes */
+ state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred);
+ if (IS_ERR(state))
+ return ERR_CAST(state);
+ ctx->state = state;
+ return igrab(state->inode);
+}
+
+static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
+{
+ if (ctx->state == NULL)
+ return;
+ if (is_sync)
+ nfs4_close_sync(ctx->state, ctx->mode);
+ else
+ nfs4_close_state(ctx->state, ctx->mode);
+}
+
+static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
+{
+ struct nfs4_server_caps_arg args = {
+ .fhandle = fhandle,
+ };
+ struct nfs4_server_caps_res res = {};
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+ if (status == 0) {
+ memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
+ server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
+ NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
+ NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
+ NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
+ NFS_CAP_CTIME|NFS_CAP_MTIME);
+ if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
+ server->caps |= NFS_CAP_ACLS;
+ if (res.has_links != 0)
+ server->caps |= NFS_CAP_HARDLINKS;
+ if (res.has_symlinks != 0)
+ server->caps |= NFS_CAP_SYMLINKS;
+ if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
+ server->caps |= NFS_CAP_FILEID;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
+ server->caps |= NFS_CAP_MODE;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
+ server->caps |= NFS_CAP_NLINK;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
+ server->caps |= NFS_CAP_OWNER;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
+ server->caps |= NFS_CAP_OWNER_GROUP;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
+ server->caps |= NFS_CAP_ATIME;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
+ server->caps |= NFS_CAP_CTIME;
+ if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
+ server->caps |= NFS_CAP_MTIME;
+
+ memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
+ server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
+ server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
+ server->acl_bitmask = res.acl_bitmask;
+ server->fh_expire_type = res.fh_expire_type;
+ }
+
+ return status;
+}
+
+int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_server_capabilities(server, fhandle),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ struct nfs4_lookup_root_arg args = {
+ .bitmask = nfs4_fattr_bitmap,
+ };
+ struct nfs4_lookup_res res = {
+ .server = server,
+ .fattr = info->fattr,
+ .fh = fhandle,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ nfs_fattr_init(info->fattr);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+}
+
+static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = _nfs4_lookup_root(server, fhandle, info);
+ switch (err) {
+ case 0:
+ case -NFS4ERR_WRONGSEC:
+ goto out;
+ default:
+ err = nfs4_handle_exception(server, err, &exception);
+ }
+ } while (exception.retry);
+out:
+ return err;
+}
+
+static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info, rpc_authflavor_t flavor)
+{
+ struct rpc_auth *auth;
+ int ret;
+
+ auth = rpcauth_create(flavor, server->client);
+ if (!auth) {
+ ret = -EIO;
+ goto out;
+ }
+ ret = nfs4_lookup_root(server, fhandle, info);
+out:
+ return ret;
+}
+
+static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ int i, len, status = 0;
+ rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
+
+ len = gss_mech_list_pseudoflavors(&flav_array[0]);
+ flav_array[len] = RPC_AUTH_NULL;
+ len += 1;
+
+ for (i = 0; i < len; i++) {
+ status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
+ if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
+ continue;
+ break;
+ }
+ /*
+ * -EACCESS could mean that the user doesn't have correct permissions
+ * to access the mount. It could also mean that we tried to mount
+ * with a gss auth flavor, but rpc.gssd isn't running. Either way,
+ * existing mount programs don't handle -EACCES very well so it should
+ * be mapped to -EPERM instead.
+ */
+ if (status == -EACCES)
+ status = -EPERM;
+ return status;
+}
+
+/*
+ * get the file handle for the "/" directory on the server
+ */
+static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ int minor_version = server->nfs_client->cl_minorversion;
+ int status = nfs4_lookup_root(server, fhandle, info);
+ if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
+ /*
+ * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
+ * by nfs4_map_errors() as this function exits.
+ */
+ status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
+ if (status == 0)
+ status = nfs4_server_capabilities(server, fhandle);
+ if (status == 0)
+ status = nfs4_do_fsinfo(server, fhandle, info);
+ return nfs4_map_errors(status);
+}
+
+/*
+ * Get locations and (maybe) other attributes of a referral.
+ * Note that we'll actually follow the referral later when
+ * we detect fsid mismatch in inode revalidation
+ */
+static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
+ const struct qstr *name, struct nfs_fattr *fattr,
+ struct nfs_fh *fhandle)
+{
+ int status = -ENOMEM;
+ struct page *page = NULL;
+ struct nfs4_fs_locations *locations = NULL;
+
+ page = alloc_page(GFP_KERNEL);
+ if (page == NULL)
+ goto out;
+ locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+ if (locations == NULL)
+ goto out;
+
+ status = nfs4_proc_fs_locations(client, dir, name, locations, page);
+ if (status != 0)
+ goto out;
+ /* Make sure server returned a different fsid for the referral */
+ if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
+ dprintk("%s: server did not return a different fsid for"
+ " a referral at %s\n", __func__, name->name);
+ status = -EIO;
+ goto out;
+ }
+ /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
+ nfs_fixup_referral_attributes(&locations->fattr);
+
+ /* replace the lookup nfs_fattr with the locations nfs_fattr */
+ memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
+ memset(fhandle, 0, sizeof(struct nfs_fh));
+out:
+ if (page)
+ __free_page(page);
+ kfree(locations);
+ return status;
+}
+
+static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ struct nfs4_getattr_arg args = {
+ .fh = fhandle,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs4_getattr_res res = {
+ .fattr = fattr,
+ .server = server,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ nfs_fattr_init(fattr);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+}
+
+static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_proc_getattr(server, fhandle, fattr),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+/*
+ * The file is not closed if it is opened due to the a request to change
+ * the size of the file. The open call will not be needed once the
+ * VFS layer lookup-intents are implemented.
+ *
+ * Close is called when the inode is destroyed.
+ * If we haven't opened the file for O_WRONLY, we
+ * need to in the size_change case to obtain a stateid.
+ *
+ * Got race?
+ * Because OPEN is always done by name in nfsv4, it is
+ * possible that we opened a different file by the same
+ * name. We can recognize this race condition, but we
+ * can't do anything about it besides returning an error.
+ *
+ * This will be fixed with VFS changes (lookup-intent).
+ */
+static int
+nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+ struct iattr *sattr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct rpc_cred *cred = NULL;
+ struct nfs4_state *state = NULL;
+ int status;
+
+ if (pnfs_ld_layoutret_on_setattr(inode))
+ pnfs_return_layout(inode);
+
+ nfs_fattr_init(fattr);
+
+ /* Search for an existing open(O_WRITE) file */
+ if (sattr->ia_valid & ATTR_FILE) {
+ struct nfs_open_context *ctx;
+
+ ctx = nfs_file_open_context(sattr->ia_file);
+ if (ctx) {
+ cred = ctx->cred;
+ state = ctx->state;
+ }
+ }
+
+ /* Deal with open(O_TRUNC) */
+ if (sattr->ia_valid & ATTR_OPEN)
+ sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
+
+ status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
+ return status;
+}
+
+static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
+ const struct qstr *name, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
+{
+ struct nfs_server *server = NFS_SERVER(dir);
+ int status;
+ struct nfs4_lookup_arg args = {
+ .bitmask = server->attr_bitmask,
+ .dir_fh = NFS_FH(dir),
+ .name = name,
+ };
+ struct nfs4_lookup_res res = {
+ .server = server,
+ .fattr = fattr,
+ .fh = fhandle,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ nfs_fattr_init(fattr);
+
+ dprintk("NFS call lookup %s\n", name->name);
+ status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
+ dprintk("NFS reply lookup: %d\n", status);
+ return status;
+}
+
+static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
+{
+ fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
+ NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
+ fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ fattr->nlink = 2;
+}
+
+static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
+ struct qstr *name, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
+{
+ struct nfs4_exception exception = { };
+ struct rpc_clnt *client = *clnt;
+ int err;
+ do {
+ err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
+ switch (err) {
+ case -NFS4ERR_BADNAME:
+ err = -ENOENT;
+ goto out;
+ case -NFS4ERR_MOVED:
+ err = nfs4_get_referral(client, dir, name, fattr, fhandle);
+ goto out;
+ case -NFS4ERR_WRONGSEC:
+ err = -EPERM;
+ if (client != *clnt)
+ goto out;
+
+ client = nfs4_create_sec_client(client, dir, name);
+ if (IS_ERR(client))
+ return PTR_ERR(client);
+
+ exception.retry = 1;
+ break;
+ default:
+ err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
+ }
+ } while (exception.retry);
+
+out:
+ if (err == 0)
+ *clnt = client;
+ else if (client != *clnt)
+ rpc_shutdown_client(client);
+
+ return err;
+}
+
+static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ int status;
+ struct rpc_clnt *client = NFS_CLIENT(dir);
+
+ status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
+ if (client != NFS_CLIENT(dir)) {
+ rpc_shutdown_client(client);
+ nfs_fixup_secinfo_attributes(fattr);
+ }
+ return status;
+}
+
+struct rpc_clnt *
+nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ int status;
+ struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
+
+ status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
+ if (status < 0) {
+ rpc_shutdown_client(client);
+ return ERR_PTR(status);
+ }
+ return client;
+}
+
+static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_accessargs args = {
+ .fh = NFS_FH(inode),
+ .bitmask = server->cache_consistency_bitmask,
+ };
+ struct nfs4_accessres res = {
+ .server = server,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = entry->cred,
+ };
+ int mode = entry->mask;
+ int status;
+
+ /*
+ * Determine which access bits we want to ask for...
+ */
+ if (mode & MAY_READ)
+ args.access |= NFS4_ACCESS_READ;
+ if (S_ISDIR(inode->i_mode)) {
+ if (mode & MAY_WRITE)
+ args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
+ if (mode & MAY_EXEC)
+ args.access |= NFS4_ACCESS_LOOKUP;
+ } else {
+ if (mode & MAY_WRITE)
+ args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
+ if (mode & MAY_EXEC)
+ args.access |= NFS4_ACCESS_EXECUTE;
+ }
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ return -ENOMEM;
+
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+ if (!status) {
+ entry->mask = 0;
+ if (res.access & NFS4_ACCESS_READ)
+ entry->mask |= MAY_READ;
+ if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
+ entry->mask |= MAY_WRITE;
+ if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
+ entry->mask |= MAY_EXEC;
+ nfs_refresh_inode(inode, res.fattr);
+ }
+ nfs_free_fattr(res.fattr);
+ return status;
+}
+
+static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(inode),
+ _nfs4_proc_access(inode, entry),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+/*
+ * TODO: For the time being, we don't try to get any attributes
+ * along with any of the zero-copy operations READ, READDIR,
+ * READLINK, WRITE.
+ *
+ * In the case of the first three, we want to put the GETATTR
+ * after the read-type operation -- this is because it is hard
+ * to predict the length of a GETATTR response in v4, and thus
+ * align the READ data correctly. This means that the GETATTR
+ * may end up partially falling into the page cache, and we should
+ * shift it into the 'tail' of the xdr_buf before processing.
+ * To do this efficiently, we need to know the total length
+ * of data received, which doesn't seem to be available outside
+ * of the RPC layer.
+ *
+ * In the case of WRITE, we also want to put the GETATTR after
+ * the operation -- in this case because we want to make sure
+ * we get the post-operation mtime and size. This means that
+ * we can't use xdr_encode_pages() as written: we need a variant
+ * of it which would leave room in the 'tail' iovec.
+ *
+ * Both of these changes to the XDR layer would in fact be quite
+ * minor, but I decided to leave them for a subsequent patch.
+ */
+static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
+ unsigned int pgbase, unsigned int pglen)
+{
+ struct nfs4_readlink args = {
+ .fh = NFS_FH(inode),
+ .pgbase = pgbase,
+ .pglen = pglen,
+ .pages = &page,
+ };
+ struct nfs4_readlink_res res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
+}
+
+static int nfs4_proc_readlink(struct inode *inode, struct page *page,
+ unsigned int pgbase, unsigned int pglen)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(inode),
+ _nfs4_proc_readlink(inode, page, pgbase, pglen),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+/*
+ * Got race?
+ * We will need to arrange for the VFS layer to provide an atomic open.
+ * Until then, this create/open method is prone to inefficiency and race
+ * conditions due to the lookup, create, and open VFS calls from sys_open()
+ * placed on the wire.
+ *
+ * Given the above sorry state of affairs, I'm simply sending an OPEN.
+ * The file will be opened again in the subsequent VFS open call
+ * (nfs4_proc_file_open).
+ *
+ * The open for read will just hang around to be used by any process that
+ * opens the file O_RDONLY. This will all be resolved with the VFS changes.
+ */
+
+static int
+nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ int flags, struct nfs_open_context *ctx)
+{
+ struct dentry *de = dentry;
+ struct nfs4_state *state;
+ struct rpc_cred *cred = NULL;
+ fmode_t fmode = 0;
+ int status = 0;
+
+ if (ctx != NULL) {
+ cred = ctx->cred;
+ de = ctx->dentry;
+ fmode = ctx->mode;
+ }
+ sattr->ia_mode &= ~current_umask();
+ state = nfs4_do_open(dir, de, fmode, flags, sattr, cred);
+ d_drop(dentry);
+ if (IS_ERR(state)) {
+ status = PTR_ERR(state);
+ goto out;
+ }
+ d_add(dentry, igrab(state->inode));
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ if (ctx != NULL)
+ ctx->state = state;
+ else
+ nfs4_close_sync(state, fmode);
+out:
+ return status;
+}
+
+static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
+{
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_removeargs args = {
+ .fh = NFS_FH(dir),
+ .name.len = name->len,
+ .name.name = name->name,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs_removeres res = {
+ .server = server,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status = -ENOMEM;
+
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.dir_attr == NULL)
+ goto out;
+
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
+ if (status == 0) {
+ update_changeattr(dir, &res.cinfo);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ }
+ nfs_free_fattr(res.dir_attr);
+out:
+ return status;
+}
+
+static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ _nfs4_proc_remove(dir, name),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
+{
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_removeargs *args = msg->rpc_argp;
+ struct nfs_removeres *res = msg->rpc_resp;
+
+ args->bitmask = server->cache_consistency_bitmask;
+ res->server = server;
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
+ nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
+}
+
+static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
+{
+ if (nfs4_setup_sequence(NFS_SERVER(data->dir),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task))
+ return;
+ rpc_call_start(task);
+}
+
+static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
+{
+ struct nfs_removeres *res = task->tk_msg.rpc_resp;
+
+ if (!nfs4_sequence_done(task, &res->seq_res))
+ return 0;
+ if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
+ return 0;
+ update_changeattr(dir, &res->cinfo);
+ nfs_post_op_update_inode(dir, res->dir_attr);
+ return 1;
+}
+
+static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+{
+ struct nfs_server *server = NFS_SERVER(dir);
+ struct nfs_renameargs *arg = msg->rpc_argp;
+ struct nfs_renameres *res = msg->rpc_resp;
+
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
+ arg->bitmask = server->attr_bitmask;
+ res->server = server;
+ nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
+}
+
+static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
+{
+ if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task))
+ return;
+ rpc_call_start(task);
+}
+
+static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
+ struct inode *new_dir)
+{
+ struct nfs_renameres *res = task->tk_msg.rpc_resp;
+
+ if (!nfs4_sequence_done(task, &res->seq_res))
+ return 0;
+ if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
+ return 0;
+
+ update_changeattr(old_dir, &res->old_cinfo);
+ nfs_post_op_update_inode(old_dir, res->old_fattr);
+ update_changeattr(new_dir, &res->new_cinfo);
+ nfs_post_op_update_inode(new_dir, res->new_fattr);
+ return 1;
+}
+
+static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
+ struct inode *new_dir, struct qstr *new_name)
+{
+ struct nfs_server *server = NFS_SERVER(old_dir);
+ struct nfs_renameargs arg = {
+ .old_dir = NFS_FH(old_dir),
+ .new_dir = NFS_FH(new_dir),
+ .old_name = old_name,
+ .new_name = new_name,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs_renameres res = {
+ .server = server,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int status = -ENOMEM;
+
+ res.old_fattr = nfs_alloc_fattr();
+ res.new_fattr = nfs_alloc_fattr();
+ if (res.old_fattr == NULL || res.new_fattr == NULL)
+ goto out;
+
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+ if (!status) {
+ update_changeattr(old_dir, &res.old_cinfo);
+ nfs_post_op_update_inode(old_dir, res.old_fattr);
+ update_changeattr(new_dir, &res.new_cinfo);
+ nfs_post_op_update_inode(new_dir, res.new_fattr);
+ }
+out:
+ nfs_free_fattr(res.new_fattr);
+ nfs_free_fattr(res.old_fattr);
+ return status;
+}
+
+static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
+ struct inode *new_dir, struct qstr *new_name)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(old_dir),
+ _nfs4_proc_rename(old_dir, old_name,
+ new_dir, new_name),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_link_arg arg = {
+ .fh = NFS_FH(inode),
+ .dir_fh = NFS_FH(dir),
+ .name = name,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs4_link_res res = {
+ .server = server,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int status = -ENOMEM;
+
+ res.fattr = nfs_alloc_fattr();
+ res.dir_attr = nfs_alloc_fattr();
+ if (res.fattr == NULL || res.dir_attr == NULL)
+ goto out;
+
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+ if (!status) {
+ update_changeattr(dir, &res.cinfo);
+ nfs_post_op_update_inode(dir, res.dir_attr);
+ nfs_post_op_update_inode(inode, res.fattr);
+ }
+out:
+ nfs_free_fattr(res.dir_attr);
+ nfs_free_fattr(res.fattr);
+ return status;
+}
+
+static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(inode),
+ _nfs4_proc_link(inode, dir, name),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+struct nfs4_createdata {
+ struct rpc_message msg;
+ struct nfs4_create_arg arg;
+ struct nfs4_create_res res;
+ struct nfs_fh fh;
+ struct nfs_fattr fattr;
+ struct nfs_fattr dir_fattr;
+};
+
+static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
+ struct qstr *name, struct iattr *sattr, u32 ftype)
+{
+ struct nfs4_createdata *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data != NULL) {
+ struct nfs_server *server = NFS_SERVER(dir);
+
+ data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
+ data->msg.rpc_argp = &data->arg;
+ data->msg.rpc_resp = &data->res;
+ data->arg.dir_fh = NFS_FH(dir);
+ data->arg.server = server;
+ data->arg.name = name;
+ data->arg.attrs = sattr;
+ data->arg.ftype = ftype;
+ data->arg.bitmask = server->attr_bitmask;
+ data->res.server = server;
+ data->res.fh = &data->fh;
+ data->res.fattr = &data->fattr;
+ data->res.dir_fattr = &data->dir_fattr;
+ nfs_fattr_init(data->res.fattr);
+ nfs_fattr_init(data->res.dir_fattr);
+ }
+ return data;
+}
+
+static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
+{
+ int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
+ &data->arg.seq_args, &data->res.seq_res, 1);
+ if (status == 0) {
+ update_changeattr(dir, &data->res.dir_cinfo);
+ nfs_post_op_update_inode(dir, data->res.dir_fattr);
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ }
+ return status;
+}
+
+static void nfs4_free_createdata(struct nfs4_createdata *data)
+{
+ kfree(data);
+}
+
+static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
+ struct page *page, unsigned int len, struct iattr *sattr)
+{
+ struct nfs4_createdata *data;
+ int status = -ENAMETOOLONG;
+
+ if (len > NFS4_MAXPATHLEN)
+ goto out;
+
+ status = -ENOMEM;
+ data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
+ if (data == NULL)
+ goto out;
+
+ data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
+ data->arg.u.symlink.pages = &page;
+ data->arg.u.symlink.len = len;
+
+ status = nfs4_do_create(dir, dentry, data);
+
+ nfs4_free_createdata(data);
+out:
+ return status;
+}
+
+static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
+ struct page *page, unsigned int len, struct iattr *sattr)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ _nfs4_proc_symlink(dir, dentry, page,
+ len, sattr),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr)
+{
+ struct nfs4_createdata *data;
+ int status = -ENOMEM;
+
+ data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
+ if (data == NULL)
+ goto out;
+
+ status = nfs4_do_create(dir, dentry, data);
+
+ nfs4_free_createdata(data);
+out:
+ return status;
+}
+
+static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ sattr->ia_mode &= ~current_umask();
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ _nfs4_proc_mkdir(dir, dentry, sattr),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ u64 cookie, struct page **pages, unsigned int count, int plus)
+{
+ struct inode *dir = dentry->d_inode;
+ struct nfs4_readdir_arg args = {
+ .fh = NFS_FH(dir),
+ .pages = pages,
+ .pgbase = 0,
+ .count = count,
+ .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
+ .plus = plus,
+ };
+ struct nfs4_readdir_res res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+ int status;
+
+ dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
+ dentry->d_parent->d_name.name,
+ dentry->d_name.name,
+ (unsigned long long)cookie);
+ nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
+ res.pgbase = args.pgbase;
+ status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
+ if (status >= 0) {
+ memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
+ status += args.pgbase;
+ }
+
+ nfs_invalidate_atime(dir);
+
+ dprintk("%s: returns %d\n", __func__, status);
+ return status;
+}
+
+static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ u64 cookie, struct page **pages, unsigned int count, int plus)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
+ _nfs4_proc_readdir(dentry, cred, cookie,
+ pages, count, plus),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr, dev_t rdev)
+{
+ struct nfs4_createdata *data;
+ int mode = sattr->ia_mode;
+ int status = -ENOMEM;
+
+ BUG_ON(!(sattr->ia_valid & ATTR_MODE));
+ BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
+
+ data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
+ if (data == NULL)
+ goto out;
+
+ if (S_ISFIFO(mode))
+ data->arg.ftype = NF4FIFO;
+ else if (S_ISBLK(mode)) {
+ data->arg.ftype = NF4BLK;
+ data->arg.u.device.specdata1 = MAJOR(rdev);
+ data->arg.u.device.specdata2 = MINOR(rdev);
+ }
+ else if (S_ISCHR(mode)) {
+ data->arg.ftype = NF4CHR;
+ data->arg.u.device.specdata1 = MAJOR(rdev);
+ data->arg.u.device.specdata2 = MINOR(rdev);
+ }
+
+ status = nfs4_do_create(dir, dentry, data);
+
+ nfs4_free_createdata(data);
+out:
+ return status;
+}
+
+static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
+ struct iattr *sattr, dev_t rdev)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ sattr->ia_mode &= ~current_umask();
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ _nfs4_proc_mknod(dir, dentry, sattr, rdev),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsstat *fsstat)
+{
+ struct nfs4_statfs_arg args = {
+ .fh = fhandle,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs4_statfs_res res = {
+ .fsstat = fsstat,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ nfs_fattr_init(fsstat->fattr);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+}
+
+static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_proc_statfs(server, fhandle, fsstat),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *fsinfo)
+{
+ struct nfs4_fsinfo_arg args = {
+ .fh = fhandle,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs4_fsinfo_res res = {
+ .fsinfo = fsinfo,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+}
+
+static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_do_fsinfo(server, fhandle, fsinfo),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
+{
+ nfs_fattr_init(fsinfo->fattr);
+ return nfs4_do_fsinfo(server, fhandle, fsinfo);
+}
+
+static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_pathconf *pathconf)
+{
+ struct nfs4_pathconf_arg args = {
+ .fh = fhandle,
+ .bitmask = server->attr_bitmask,
+ };
+ struct nfs4_pathconf_res res = {
+ .pathconf = pathconf,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ /* None of the pathconf attributes are mandatory to implement */
+ if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
+ memset(pathconf, 0, sizeof(*pathconf));
+ return 0;
+ }
+
+ nfs_fattr_init(pathconf->fattr);
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+}
+
+static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_pathconf *pathconf)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_proc_pathconf(server, fhandle, pathconf),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+void __nfs4_read_done_cb(struct nfs_read_data *data)
+{
+ nfs_invalidate_atime(data->inode);
+}
+
+static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
+{
+ struct nfs_server *server = NFS_SERVER(data->inode);
+
+ if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+
+ __nfs4_read_done_cb(data);
+ if (task->tk_status > 0)
+ renew_lease(server, data->timestamp);
+ return 0;
+}
+
+static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
+{
+
+ dprintk("--> %s\n", __func__);
+
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
+
+ return data->read_done_cb ? data->read_done_cb(task, data) :
+ nfs4_read_done_cb(task, data);
+}
+
+static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
+{
+ data->timestamp = jiffies;
+ data->read_done_cb = nfs4_read_done_cb;
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
+ nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
+}
+
+static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
+{
+ if (nfs4_setup_sequence(NFS_SERVER(data->inode),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task))
+ return;
+ rpc_call_start(task);
+}
+
+/* Reset the the nfs_read_data to send the read to the MDS. */
+void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
+{
+ dprintk("%s Reset task for i/o through\n", __func__);
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ /* offsets will differ in the dense stripe case */
+ data->args.offset = data->mds_offset;
+ data->ds_clp = NULL;
+ data->args.fh = NFS_FH(data->inode);
+ data->read_done_cb = nfs4_read_done_cb;
+ task->tk_ops = data->mds_ops;
+ rpc_task_reset_client(task, NFS_CLIENT(data->inode));
+}
+EXPORT_SYMBOL_GPL(nfs4_reset_read);
+
+static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
+{
+ struct inode *inode = data->inode;
+
+ if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+ if (task->tk_status >= 0) {
+ renew_lease(NFS_SERVER(inode), data->timestamp);
+ nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
+ }
+ return 0;
+}
+
+static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
+ return data->write_done_cb ? data->write_done_cb(task, data) :
+ nfs4_write_done_cb(task, data);
+}
+
+/* Reset the the nfs_write_data to send the write to the MDS. */
+void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data)
+{
+ dprintk("%s Reset task for i/o through\n", __func__);
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ data->ds_clp = NULL;
+ data->write_done_cb = nfs4_write_done_cb;
+ data->args.fh = NFS_FH(data->inode);
+ data->args.bitmask = data->res.server->cache_consistency_bitmask;
+ data->args.offset = data->mds_offset;
+ data->res.fattr = &data->fattr;
+ task->tk_ops = data->mds_ops;
+ rpc_task_reset_client(task, NFS_CLIENT(data->inode));
+}
+EXPORT_SYMBOL_GPL(nfs4_reset_write);
+
+static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
+{
+ struct nfs_server *server = NFS_SERVER(data->inode);
+
+ if (data->lseg) {
+ data->args.bitmask = NULL;
+ data->res.fattr = NULL;
+ } else
+ data->args.bitmask = server->cache_consistency_bitmask;
+ if (!data->write_done_cb)
+ data->write_done_cb = nfs4_write_done_cb;
+ data->res.server = server;
+ data->timestamp = jiffies;
+
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
+ nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
+}
+
+static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (nfs4_setup_sequence(NFS_SERVER(data->inode),
+ &data->args.seq_args,
+ &data->res.seq_res,
+ task))
+ return;
+ rpc_call_start(task);
+}
+
+static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data)
+{
+ struct inode *inode = data->inode;
+
+ if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return -EAGAIN;
+ }
+ nfs_refresh_inode(inode, data->res.fattr);
+ return 0;
+}
+
+static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return -EAGAIN;
+ return data->write_done_cb(task, data);
+}
+
+static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+{
+ struct nfs_server *server = NFS_SERVER(data->inode);
+
+ if (data->lseg) {
+ data->args.bitmask = NULL;
+ data->res.fattr = NULL;
+ } else
+ data->args.bitmask = server->cache_consistency_bitmask;
+ if (!data->write_done_cb)
+ data->write_done_cb = nfs4_commit_done_cb;
+ data->res.server = server;
+ msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
+ nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
+}
+
+struct nfs4_renewdata {
+ struct nfs_client *client;
+ unsigned long timestamp;
+};
+
+/*
+ * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
+ * standalone procedure for queueing an asynchronous RENEW.
+ */
+static void nfs4_renew_release(void *calldata)
+{
+ struct nfs4_renewdata *data = calldata;
+ struct nfs_client *clp = data->client;
+
+ if (atomic_read(&clp->cl_count) > 1)
+ nfs4_schedule_state_renewal(clp);
+ nfs_put_client(clp);
+ kfree(data);
+}
+
+static void nfs4_renew_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_renewdata *data = calldata;
+ struct nfs_client *clp = data->client;
+ unsigned long timestamp = data->timestamp;
+
+ if (task->tk_status < 0) {
+ /* Unless we're shutting down, schedule state recovery! */
+ if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
+ return;
+ if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
+ nfs4_schedule_lease_recovery(clp);
+ return;
+ }
+ nfs4_schedule_path_down_recovery(clp);
+ }
+ do_renew_lease(clp, timestamp);
+}
+
+static const struct rpc_call_ops nfs4_renew_ops = {
+ .rpc_call_done = nfs4_renew_done,
+ .rpc_release = nfs4_renew_release,
+};
+
+static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
+ .rpc_argp = clp,
+ .rpc_cred = cred,
+ };
+ struct nfs4_renewdata *data;
+
+ if (renew_flags == 0)
+ return 0;
+ if (!atomic_inc_not_zero(&clp->cl_count))
+ return -EIO;
+ data = kmalloc(sizeof(*data), GFP_NOFS);
+ if (data == NULL)
+ return -ENOMEM;
+ data->client = clp;
+ data->timestamp = jiffies;
+ return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
+ &nfs4_renew_ops, data);
+}
+
+static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
+ .rpc_argp = clp,
+ .rpc_cred = cred,
+ };
+ unsigned long now = jiffies;
+ int status;
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
+ if (status < 0)
+ return status;
+ do_renew_lease(clp, now);
+ return 0;
+}
+
+static inline int nfs4_server_supports_acls(struct nfs_server *server)
+{
+ return (server->caps & NFS_CAP_ACLS)
+ && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
+ && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
+}
+
+/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
+ * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
+ * the stack.
+ */
+#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+
+static int buf_to_pages_noslab(const void *buf, size_t buflen,
+ struct page **pages, unsigned int *pgbase)
+{
+ struct page *newpage, **spages;
+ int rc = 0;
+ size_t len;
+ spages = pages;
+
+ do {
+ len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
+ newpage = alloc_page(GFP_KERNEL);
+
+ if (newpage == NULL)
+ goto unwind;
+ memcpy(page_address(newpage), buf, len);
+ buf += len;
+ buflen -= len;
+ *pages++ = newpage;
+ rc++;
+ } while (buflen != 0);
+
+ return rc;
+
+unwind:
+ for(; rc > 0; rc--)
+ __free_page(spages[rc-1]);
+ return -ENOMEM;
+}
+
+struct nfs4_cached_acl {
+ int cached;
+ size_t len;
+ char data[0];
+};
+
+static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ spin_lock(&inode->i_lock);
+ kfree(nfsi->nfs4_acl);
+ nfsi->nfs4_acl = acl;
+ spin_unlock(&inode->i_lock);
+}
+
+static void nfs4_zap_acl_attr(struct inode *inode)
+{
+ nfs4_set_cached_acl(inode, NULL);
+}
+
+static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs4_cached_acl *acl;
+ int ret = -ENOENT;
+
+ spin_lock(&inode->i_lock);
+ acl = nfsi->nfs4_acl;
+ if (acl == NULL)
+ goto out;
+ if (buf == NULL) /* user is just asking for length */
+ goto out_len;
+ if (acl->cached == 0)
+ goto out;
+ ret = -ERANGE; /* see getxattr(2) man page */
+ if (acl->len > buflen)
+ goto out;
+ memcpy(buf, acl->data, acl->len);
+out_len:
+ ret = acl->len;
+out:
+ spin_unlock(&inode->i_lock);
+ return ret;
+}
+
+static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
+{
+ struct nfs4_cached_acl *acl;
+
+ if (pages && acl_len <= PAGE_SIZE) {
+ acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
+ if (acl == NULL)
+ goto out;
+ acl->cached = 1;
+ _copy_from_pages(acl->data, pages, pgbase, acl_len);
+ } else {
+ acl = kmalloc(sizeof(*acl), GFP_KERNEL);
+ if (acl == NULL)
+ goto out;
+ acl->cached = 0;
+ }
+ acl->len = acl_len;
+out:
+ nfs4_set_cached_acl(inode, acl);
+}
+
+/*
+ * The getxattr API returns the required buffer length when called with a
+ * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
+ * the required buf. On a NULL buf, we send a page of data to the server
+ * guessing that the ACL request can be serviced by a page. If so, we cache
+ * up to the page of ACL data, and the 2nd call to getxattr is serviced by
+ * the cache. If not so, we throw away the page, and cache the required
+ * length. The next getxattr call will then produce another round trip to
+ * the server, this time with the input buf of the required size.
+ */
+static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+{
+ struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+ struct nfs_getaclargs args = {
+ .fh = NFS_FH(inode),
+ .acl_pages = pages,
+ .acl_len = buflen,
+ };
+ struct nfs_getaclres res = {
+ .acl_len = buflen,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int ret = -ENOMEM, npages, i, acl_len = 0;
+
+ npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ /* As long as we're doing a round trip to the server anyway,
+ * let's be prepared for a page of acl data. */
+ if (npages == 0)
+ npages = 1;
+
+ /* Add an extra page to handle the bitmap returned */
+ npages++;
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL);
+ if (!pages[i])
+ goto out_free;
+ }
+
+ /* for decoding across pages */
+ res.acl_scratch = alloc_page(GFP_KERNEL);
+ if (!res.acl_scratch)
+ goto out_free;
+
+ args.acl_len = npages * PAGE_SIZE;
+ args.acl_pgbase = 0;
+
+ /* Let decode_getfacl know not to fail if the ACL data is larger than
+ * the page we send as a guess */
+ if (buf == NULL)
+ res.acl_flags |= NFS4_ACL_LEN_REQUEST;
+
+ dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
+ __func__, buf, buflen, npages, args.acl_len);
+ ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
+ &msg, &args.seq_args, &res.seq_res, 0);
+ if (ret)
+ goto out_free;
+
+ acl_len = res.acl_len - res.acl_data_offset;
+ if (acl_len > args.acl_len)
+ nfs4_write_cached_acl(inode, NULL, 0, acl_len);
+ else
+ nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
+ acl_len);
+ if (buf) {
+ ret = -ERANGE;
+ if (acl_len > buflen)
+ goto out_free;
+ _copy_from_pages(buf, pages, res.acl_data_offset,
+ acl_len);
+ }
+ ret = acl_len;
+out_free:
+ for (i = 0; i < npages; i++)
+ if (pages[i])
+ __free_page(pages[i]);
+ if (res.acl_scratch)
+ __free_page(res.acl_scratch);
+ return ret;
+}
+
+static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
+{
+ struct nfs4_exception exception = { };
+ ssize_t ret;
+ do {
+ ret = __nfs4_get_acl_uncached(inode, buf, buflen);
+ if (ret >= 0)
+ break;
+ ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
+ } while (exception.retry);
+ return ret;
+}
+
+static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ int ret;
+
+ if (!nfs4_server_supports_acls(server))
+ return -EOPNOTSUPP;
+ ret = nfs_revalidate_inode(server, inode);
+ if (ret < 0)
+ return ret;
+ if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
+ nfs_zap_acl_cache(inode);
+ ret = nfs4_read_cached_acl(inode, buf, buflen);
+ if (ret != -ENOENT)
+ /* -ENOENT is returned if there is no ACL or if there is an ACL
+ * but no cached acl data, just the acl length */
+ return ret;
+ return nfs4_get_acl_uncached(inode, buf, buflen);
+}
+
+static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct page *pages[NFS4ACL_MAXPAGES];
+ struct nfs_setaclargs arg = {
+ .fh = NFS_FH(inode),
+ .acl_pages = pages,
+ .acl_len = buflen,
+ };
+ struct nfs_setaclres res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int ret, i;
+
+ if (!nfs4_server_supports_acls(server))
+ return -EOPNOTSUPP;
+ i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
+ if (i < 0)
+ return i;
+ nfs_inode_return_delegation(inode);
+ ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+
+ /*
+ * Free each page after tx, so the only ref left is
+ * held by the network stack
+ */
+ for (; i > 0; i--)
+ put_page(pages[i-1]);
+
+ /*
+ * Acl update can result in inode attribute update.
+ * so mark the attribute cache invalid.
+ */
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
+ spin_unlock(&inode->i_lock);
+ nfs_access_zap_cache(inode);
+ nfs_zap_acl_cache(inode);
+ return ret;
+}
+
+static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(inode),
+ __nfs4_proc_set_acl(inode, buf, buflen),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int
+nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
+{
+ struct nfs_client *clp = server->nfs_client;
+
+ if (task->tk_status >= 0)
+ return 0;
+ switch(task->tk_status) {
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ if (state == NULL)
+ break;
+ nfs_remove_bad_delegation(state->inode);
+ case -NFS4ERR_OPENMODE:
+ if (state == NULL)
+ break;
+ nfs4_schedule_stateid_recovery(server, state);
+ goto wait_on_recovery;
+ case -NFS4ERR_EXPIRED:
+ if (state != NULL)
+ nfs4_schedule_stateid_recovery(server, state);
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_STALE_CLIENTID:
+ nfs4_schedule_lease_recovery(clp);
+ goto wait_on_recovery;
+#if defined(CONFIG_NFS_V4_1)
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ dprintk("%s ERROR %d, Reset session\n", __func__,
+ task->tk_status);
+ nfs4_schedule_session_recovery(clp->cl_session);
+ task->tk_status = 0;
+ return -EAGAIN;
+#endif /* CONFIG_NFS_V4_1 */
+ case -NFS4ERR_DELAY:
+ nfs_inc_server_stats(server, NFSIOS_DELAY);
+ case -NFS4ERR_GRACE:
+ case -EKEYEXPIRED:
+ rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ task->tk_status = 0;
+ return -EAGAIN;
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ case -NFS4ERR_OLD_STATEID:
+ task->tk_status = 0;
+ return -EAGAIN;
+ }
+ task->tk_status = nfs4_map_errors(task->tk_status);
+ return 0;
+wait_on_recovery:
+ rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
+ if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
+ rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
+ task->tk_status = 0;
+ return -EAGAIN;
+}
+
+static void nfs4_construct_boot_verifier(struct nfs_client *clp,
+ nfs4_verifier *bootverf)
+{
+ __be32 verf[2];
+
+ verf[0] = htonl((u32)clp->cl_boot_time.tv_sec);
+ verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec);
+ memcpy(bootverf->data, verf, sizeof(bootverf->data));
+}
+
+int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
+ unsigned short port, struct rpc_cred *cred,
+ struct nfs4_setclientid_res *res)
+{
+ nfs4_verifier sc_verifier;
+ struct nfs4_setclientid setclientid = {
+ .sc_verifier = &sc_verifier,
+ .sc_prog = program,
+ .sc_cb_ident = clp->cl_cb_ident,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
+ .rpc_argp = &setclientid,
+ .rpc_resp = res,
+ .rpc_cred = cred,
+ };
+ int loop = 0;
+ int status;
+
+ nfs4_construct_boot_verifier(clp, &sc_verifier);
+
+ for(;;) {
+ rcu_read_lock();
+ setclientid.sc_name_len = scnprintf(setclientid.sc_name,
+ sizeof(setclientid.sc_name), "%s/%s %s %s %u",
+ clp->cl_ipaddr,
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_ADDR),
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_PROTO),
+ clp->cl_rpcclient->cl_auth->au_ops->au_name,
+ clp->cl_id_uniquifier);
+ setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
+ sizeof(setclientid.sc_netid),
+ rpc_peeraddr2str(clp->cl_rpcclient,
+ RPC_DISPLAY_NETID));
+ setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
+ sizeof(setclientid.sc_uaddr), "%s.%u.%u",
+ clp->cl_ipaddr, port >> 8, port & 255);
+ rcu_read_unlock();
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ if (status != -NFS4ERR_CLID_INUSE)
+ break;
+ if (loop != 0) {
+ ++clp->cl_id_uniquifier;
+ break;
+ }
+ ++loop;
+ ssleep(clp->cl_lease_time / HZ + 1);
+ }
+ return status;
+}
+
+int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
+ struct nfs4_setclientid_res *arg,
+ struct rpc_cred *cred)
+{
+ struct nfs_fsinfo fsinfo;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
+ .rpc_argp = arg,
+ .rpc_resp = &fsinfo,
+ .rpc_cred = cred,
+ };
+ unsigned long now;
+ int status;
+
+ now = jiffies;
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ if (status == 0) {
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = fsinfo.lease_time * HZ;
+ clp->cl_last_renewal = now;
+ spin_unlock(&clp->cl_lock);
+ }
+ return status;
+}
+
+struct nfs4_delegreturndata {
+ struct nfs4_delegreturnargs args;
+ struct nfs4_delegreturnres res;
+ struct nfs_fh fh;
+ nfs4_stateid stateid;
+ unsigned long timestamp;
+ struct nfs_fattr fattr;
+ int rpc_status;
+};
+
+static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_delegreturndata *data = calldata;
+
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return;
+
+ switch (task->tk_status) {
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case 0:
+ renew_lease(data->res.server, data->timestamp);
+ break;
+ default:
+ if (nfs4_async_handle_error(task, data->res.server, NULL) ==
+ -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ }
+ data->rpc_status = task->tk_status;
+}
+
+static void nfs4_delegreturn_release(void *calldata)
+{
+ kfree(calldata);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_delegreturndata *d_data;
+
+ d_data = (struct nfs4_delegreturndata *)data;
+
+ if (nfs4_setup_sequence(d_data->res.server,
+ &d_data->args.seq_args,
+ &d_data->res.seq_res, task))
+ return;
+ rpc_call_start(task);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static const struct rpc_call_ops nfs4_delegreturn_ops = {
+#if defined(CONFIG_NFS_V4_1)
+ .rpc_call_prepare = nfs4_delegreturn_prepare,
+#endif /* CONFIG_NFS_V4_1 */
+ .rpc_call_done = nfs4_delegreturn_done,
+ .rpc_release = nfs4_delegreturn_release,
+};
+
+static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
+{
+ struct nfs4_delegreturndata *data;
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
+ .rpc_cred = cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_delegreturn_ops,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status = 0;
+
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (data == NULL)
+ return -ENOMEM;
+ nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
+ data->args.fhandle = &data->fh;
+ data->args.stateid = &data->stateid;
+ data->args.bitmask = server->attr_bitmask;
+ nfs_copy_fh(&data->fh, NFS_FH(inode));
+ nfs4_stateid_copy(&data->stateid, stateid);
+ data->res.fattr = &data->fattr;
+ data->res.server = server;
+ nfs_fattr_init(data->res.fattr);
+ data->timestamp = jiffies;
+ data->rpc_status = 0;
+
+ task_setup_data.callback_data = data;
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ if (!issync)
+ goto out;
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0)
+ goto out;
+ status = data->rpc_status;
+ if (status != 0)
+ goto out;
+ nfs_refresh_inode(inode, &data->fattr);
+out:
+ rpc_put_task(task);
+ return status;
+}
+
+int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
+ switch (err) {
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case 0:
+ return 0;
+ }
+ err = nfs4_handle_exception(server, err, &exception);
+ } while (exception.retry);
+ return err;
+}
+
+#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
+#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
+
+/*
+ * sleep, with exponential backoff, and retry the LOCK operation.
+ */
+static unsigned long
+nfs4_set_lock_task_retry(unsigned long timeout)
+{
+ freezable_schedule_timeout_killable(timeout);
+ timeout <<= 1;
+ if (timeout > NFS4_LOCK_MAXTIMEOUT)
+ return NFS4_LOCK_MAXTIMEOUT;
+ return timeout;
+}
+
+static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ struct inode *inode = state->inode;
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs_lockt_args arg = {
+ .fh = NFS_FH(inode),
+ .fl = request,
+ };
+ struct nfs_lockt_res res = {
+ .denied = request,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ .rpc_cred = state->owner->so_cred,
+ };
+ struct nfs4_lock_state *lsp;
+ int status;
+
+ arg.lock_owner.clientid = clp->cl_clientid;
+ status = nfs4_set_lock_state(state, request);
+ if (status != 0)
+ goto out;
+ lsp = request->fl_u.nfs4_fl.owner;
+ arg.lock_owner.id = lsp->ls_seqid.owner_id;
+ arg.lock_owner.s_dev = server->s_dev;
+ status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+ switch (status) {
+ case 0:
+ request->fl_type = F_UNLCK;
+ break;
+ case -NFS4ERR_DENIED:
+ status = 0;
+ }
+ request->fl_ops->fl_release_private(request);
+out:
+ return status;
+}
+
+static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(state->inode),
+ _nfs4_proc_getlk(state, cmd, request),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int do_vfs_lock(struct file *file, struct file_lock *fl)
+{
+ int res = 0;
+ switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+ case FL_POSIX:
+ res = posix_lock_file_wait(file, fl);
+ break;
+ case FL_FLOCK:
+ res = flock_lock_file_wait(file, fl);
+ break;
+ default:
+ BUG();
+ }
+ return res;
+}
+
+struct nfs4_unlockdata {
+ struct nfs_locku_args arg;
+ struct nfs_locku_res res;
+ struct nfs4_lock_state *lsp;
+ struct nfs_open_context *ctx;
+ struct file_lock fl;
+ const struct nfs_server *server;
+ unsigned long timestamp;
+};
+
+static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
+ struct nfs_open_context *ctx,
+ struct nfs4_lock_state *lsp,
+ struct nfs_seqid *seqid)
+{
+ struct nfs4_unlockdata *p;
+ struct inode *inode = lsp->ls_state->inode;
+
+ p = kzalloc(sizeof(*p), GFP_NOFS);
+ if (p == NULL)
+ return NULL;
+ p->arg.fh = NFS_FH(inode);
+ p->arg.fl = &p->fl;
+ p->arg.seqid = seqid;
+ p->res.seqid = seqid;
+ p->arg.stateid = &lsp->ls_stateid;
+ p->lsp = lsp;
+ atomic_inc(&lsp->ls_count);
+ /* Ensure we don't close file until we're done freeing locks! */
+ p->ctx = get_nfs_open_context(ctx);
+ memcpy(&p->fl, fl, sizeof(p->fl));
+ p->server = NFS_SERVER(inode);
+ return p;
+}
+
+static void nfs4_locku_release_calldata(void *data)
+{
+ struct nfs4_unlockdata *calldata = data;
+ nfs_free_seqid(calldata->arg.seqid);
+ nfs4_put_lock_state(calldata->lsp);
+ put_nfs_open_context(calldata->ctx);
+ kfree(calldata);
+}
+
+static void nfs4_locku_done(struct rpc_task *task, void *data)
+{
+ struct nfs4_unlockdata *calldata = data;
+
+ if (!nfs4_sequence_done(task, &calldata->res.seq_res))
+ return;
+ switch (task->tk_status) {
+ case 0:
+ nfs4_stateid_copy(&calldata->lsp->ls_stateid,
+ &calldata->res.stateid);
+ renew_lease(calldata->server, calldata->timestamp);
+ break;
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_EXPIRED:
+ break;
+ default:
+ if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
+ rpc_restart_call_prepare(task);
+ }
+}
+
+static void nfs4_locku_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_unlockdata *calldata = data;
+
+ if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
+ return;
+ if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
+ /* Note: exit _without_ running nfs4_locku_done */
+ task->tk_action = NULL;
+ return;
+ }
+ calldata->timestamp = jiffies;
+ if (nfs4_setup_sequence(calldata->server,
+ &calldata->arg.seq_args,
+ &calldata->res.seq_res, task))
+ return;
+ rpc_call_start(task);
+}
+
+static const struct rpc_call_ops nfs4_locku_ops = {
+ .rpc_call_prepare = nfs4_locku_prepare,
+ .rpc_call_done = nfs4_locku_done,
+ .rpc_release = nfs4_locku_release_calldata,
+};
+
+static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
+ struct nfs_open_context *ctx,
+ struct nfs4_lock_state *lsp,
+ struct nfs_seqid *seqid)
+{
+ struct nfs4_unlockdata *data;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
+ .rpc_cred = ctx->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_locku_ops,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+
+ /* Ensure this is an unlock - when canceling a lock, the
+ * canceled lock is passed in, and it won't be an unlock.
+ */
+ fl->fl_type = F_UNLCK;
+
+ data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
+ if (data == NULL) {
+ nfs_free_seqid(seqid);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+ task_setup_data.callback_data = data;
+ return rpc_run_task(&task_setup_data);
+}
+
+static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ struct nfs_inode *nfsi = NFS_I(state->inode);
+ struct nfs_seqid *seqid;
+ struct nfs4_lock_state *lsp;
+ struct rpc_task *task;
+ int status = 0;
+ unsigned char fl_flags = request->fl_flags;
+
+ status = nfs4_set_lock_state(state, request);
+ /* Unlock _before_ we do the RPC call */
+ request->fl_flags |= FL_EXISTS;
+ down_read(&nfsi->rwsem);
+ if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
+ up_read(&nfsi->rwsem);
+ goto out;
+ }
+ up_read(&nfsi->rwsem);
+ if (status != 0)
+ goto out;
+ /* Is this a delegated lock? */
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags))
+ goto out;
+ lsp = request->fl_u.nfs4_fl.owner;
+ seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
+ status = -ENOMEM;
+ if (seqid == NULL)
+ goto out;
+ task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
+ status = PTR_ERR(task);
+ if (IS_ERR(task))
+ goto out;
+ status = nfs4_wait_for_completion_rpc_task(task);
+ rpc_put_task(task);
+out:
+ request->fl_flags = fl_flags;
+ return status;
+}
+
+struct nfs4_lockdata {
+ struct nfs_lock_args arg;
+ struct nfs_lock_res res;
+ struct nfs4_lock_state *lsp;
+ struct nfs_open_context *ctx;
+ struct file_lock fl;
+ unsigned long timestamp;
+ int rpc_status;
+ int cancelled;
+ struct nfs_server *server;
+};
+
+static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
+ struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
+ gfp_t gfp_mask)
+{
+ struct nfs4_lockdata *p;
+ struct inode *inode = lsp->ls_state->inode;
+ struct nfs_server *server = NFS_SERVER(inode);
+
+ p = kzalloc(sizeof(*p), gfp_mask);
+ if (p == NULL)
+ return NULL;
+
+ p->arg.fh = NFS_FH(inode);
+ p->arg.fl = &p->fl;
+ p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
+ if (p->arg.open_seqid == NULL)
+ goto out_free;
+ p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
+ if (p->arg.lock_seqid == NULL)
+ goto out_free_seqid;
+ p->arg.lock_stateid = &lsp->ls_stateid;
+ p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
+ p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
+ p->arg.lock_owner.s_dev = server->s_dev;
+ p->res.lock_seqid = p->arg.lock_seqid;
+ p->lsp = lsp;
+ p->server = server;
+ atomic_inc(&lsp->ls_count);
+ p->ctx = get_nfs_open_context(ctx);
+ memcpy(&p->fl, fl, sizeof(p->fl));
+ return p;
+out_free_seqid:
+ nfs_free_seqid(p->arg.open_seqid);
+out_free:
+ kfree(p);
+ return NULL;
+}
+
+static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_lockdata *data = calldata;
+ struct nfs4_state *state = data->lsp->ls_state;
+
+ dprintk("%s: begin!\n", __func__);
+ if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
+ return;
+ /* Do we need to do an open_to_lock_owner? */
+ if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
+ if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
+ return;
+ data->arg.open_stateid = &state->stateid;
+ data->arg.new_lock_owner = 1;
+ data->res.open_seqid = data->arg.open_seqid;
+ } else
+ data->arg.new_lock_owner = 0;
+ data->timestamp = jiffies;
+ if (nfs4_setup_sequence(data->server,
+ &data->arg.seq_args,
+ &data->res.seq_res, task))
+ return;
+ rpc_call_start(task);
+ dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
+}
+
+static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
+{
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ nfs4_lock_prepare(task, calldata);
+}
+
+static void nfs4_lock_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_lockdata *data = calldata;
+
+ dprintk("%s: begin!\n", __func__);
+
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return;
+
+ data->rpc_status = task->tk_status;
+ if (data->arg.new_lock_owner != 0) {
+ if (data->rpc_status == 0)
+ nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
+ else
+ goto out;
+ }
+ if (data->rpc_status == 0) {
+ nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
+ data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
+ renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
+ }
+out:
+ dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
+}
+
+static void nfs4_lock_release(void *calldata)
+{
+ struct nfs4_lockdata *data = calldata;
+
+ dprintk("%s: begin!\n", __func__);
+ nfs_free_seqid(data->arg.open_seqid);
+ if (data->cancelled != 0) {
+ struct rpc_task *task;
+ task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
+ data->arg.lock_seqid);
+ if (!IS_ERR(task))
+ rpc_put_task_async(task);
+ dprintk("%s: cancelling lock!\n", __func__);
+ } else
+ nfs_free_seqid(data->arg.lock_seqid);
+ nfs4_put_lock_state(data->lsp);
+ put_nfs_open_context(data->ctx);
+ kfree(data);
+ dprintk("%s: done!\n", __func__);
+}
+
+static const struct rpc_call_ops nfs4_lock_ops = {
+ .rpc_call_prepare = nfs4_lock_prepare,
+ .rpc_call_done = nfs4_lock_done,
+ .rpc_release = nfs4_lock_release,
+};
+
+static const struct rpc_call_ops nfs4_recover_lock_ops = {
+ .rpc_call_prepare = nfs4_recover_lock_prepare,
+ .rpc_call_done = nfs4_lock_done,
+ .rpc_release = nfs4_lock_release,
+};
+
+static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
+{
+ switch (error) {
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+ if (new_lock_owner != 0 ||
+ (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
+ nfs4_schedule_stateid_recovery(server, lsp->ls_state);
+ break;
+ case -NFS4ERR_STALE_STATEID:
+ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+ case -NFS4ERR_EXPIRED:
+ nfs4_schedule_lease_recovery(server->nfs_client);
+ };
+}
+
+static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
+{
+ struct nfs4_lockdata *data;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
+ .rpc_cred = state->owner->so_cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = NFS_CLIENT(state->inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_lock_ops,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int ret;
+
+ dprintk("%s: begin!\n", __func__);
+ data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
+ fl->fl_u.nfs4_fl.owner,
+ recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
+ if (data == NULL)
+ return -ENOMEM;
+ if (IS_SETLKW(cmd))
+ data->arg.block = 1;
+ if (recovery_type > NFS_LOCK_NEW) {
+ if (recovery_type == NFS_LOCK_RECLAIM)
+ data->arg.reclaim = NFS_LOCK_RECLAIM;
+ task_setup_data.callback_ops = &nfs4_recover_lock_ops;
+ }
+ nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+ task_setup_data.callback_data = data;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ ret = nfs4_wait_for_completion_rpc_task(task);
+ if (ret == 0) {
+ ret = data->rpc_status;
+ if (ret)
+ nfs4_handle_setlk_error(data->server, data->lsp,
+ data->arg.new_lock_owner, ret);
+ } else
+ data->cancelled = 1;
+ rpc_put_task(task);
+ dprintk("%s: done, ret = %d!\n", __func__, ret);
+ return ret;
+}
+
+static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
+{
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs4_exception exception = {
+ .inode = state->inode,
+ };
+ int err;
+
+ do {
+ /* Cache the lock if possible... */
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
+ return 0;
+ err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
+ if (err != -NFS4ERR_DELAY)
+ break;
+ nfs4_handle_exception(server, err, &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
+{
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs4_exception exception = {
+ .inode = state->inode,
+ };
+ int err;
+
+ err = nfs4_set_lock_state(state, request);
+ if (err != 0)
+ return err;
+ do {
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
+ return 0;
+ err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
+ switch (err) {
+ default:
+ goto out;
+ case -NFS4ERR_GRACE:
+ case -NFS4ERR_DELAY:
+ nfs4_handle_exception(server, err, &exception);
+ err = 0;
+ }
+ } while (exception.retry);
+out:
+ return err;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+static int nfs41_check_expired_locks(struct nfs4_state *state)
+{
+ int status, ret = NFS_OK;
+ struct nfs4_lock_state *lsp;
+ struct nfs_server *server = NFS_SERVER(state->inode);
+
+ list_for_each_entry(lsp, &state->lock_states, ls_locks) {
+ if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
+ status = nfs41_test_stateid(server, &lsp->ls_stateid);
+ if (status != NFS_OK) {
+ nfs41_free_stateid(server, &lsp->ls_stateid);
+ lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
+ ret = status;
+ }
+ }
+ };
+
+ return ret;
+}
+
+static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
+{
+ int status = NFS_OK;
+
+ if (test_bit(LK_STATE_IN_USE, &state->flags))
+ status = nfs41_check_expired_locks(state);
+ if (status == NFS_OK)
+ return status;
+ return nfs4_lock_expired(state, request);
+}
+#endif
+
+static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ struct nfs_inode *nfsi = NFS_I(state->inode);
+ unsigned char fl_flags = request->fl_flags;
+ int status = -ENOLCK;
+
+ if ((fl_flags & FL_POSIX) &&
+ !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
+ goto out;
+ /* Is this a delegated open? */
+ status = nfs4_set_lock_state(state, request);
+ if (status != 0)
+ goto out;
+ request->fl_flags |= FL_ACCESS;
+ status = do_vfs_lock(request->fl_file, request);
+ if (status < 0)
+ goto out;
+ down_read(&nfsi->rwsem);
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+ /* Yes: cache locks! */
+ /* ...but avoid races with delegation recall... */
+ request->fl_flags = fl_flags & ~FL_SLEEP;
+ status = do_vfs_lock(request->fl_file, request);
+ goto out_unlock;
+ }
+ status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
+ if (status != 0)
+ goto out_unlock;
+ /* Note: we always want to sleep here! */
+ request->fl_flags = fl_flags | FL_SLEEP;
+ if (do_vfs_lock(request->fl_file, request) < 0)
+ printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
+ "manager!\n", __func__);
+out_unlock:
+ up_read(&nfsi->rwsem);
+out:
+ request->fl_flags = fl_flags;
+ return status;
+}
+
+static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ struct nfs4_exception exception = {
+ .state = state,
+ .inode = state->inode,
+ };
+ int err;
+
+ do {
+ err = _nfs4_proc_setlk(state, cmd, request);
+ if (err == -NFS4ERR_DENIED)
+ err = -EAGAIN;
+ err = nfs4_handle_exception(NFS_SERVER(state->inode),
+ err, &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int
+nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
+{
+ struct nfs_open_context *ctx;
+ struct nfs4_state *state;
+ unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
+ int status;
+
+ /* verify open state */
+ ctx = nfs_file_open_context(filp);
+ state = ctx->state;
+
+ if (request->fl_start < 0 || request->fl_end < 0)
+ return -EINVAL;
+
+ if (IS_GETLK(cmd)) {
+ if (state != NULL)
+ return nfs4_proc_getlk(state, F_GETLK, request);
+ return 0;
+ }
+
+ if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
+ return -EINVAL;
+
+ if (request->fl_type == F_UNLCK) {
+ if (state != NULL)
+ return nfs4_proc_unlck(state, cmd, request);
+ return 0;
+ }
+
+ if (state == NULL)
+ return -ENOLCK;
+ /*
+ * Don't rely on the VFS having checked the file open mode,
+ * since it won't do this for flock() locks.
+ */
+ switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
+ case F_RDLCK:
+ if (!(filp->f_mode & FMODE_READ))
+ return -EBADF;
+ break;
+ case F_WRLCK:
+ if (!(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+ }
+
+ do {
+ status = nfs4_proc_setlk(state, cmd, request);
+ if ((status != -EAGAIN) || IS_SETLK(cmd))
+ break;
+ timeout = nfs4_set_lock_task_retry(timeout);
+ status = -ERESTARTSYS;
+ if (signalled())
+ break;
+ } while(status < 0);
+ return status;
+}
+
+int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
+{
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs4_exception exception = { };
+ int err;
+
+ err = nfs4_set_lock_state(state, fl);
+ if (err != 0)
+ goto out;
+ do {
+ err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
+ switch (err) {
+ default:
+ printk(KERN_ERR "NFS: %s: unhandled error "
+ "%d.\n", __func__, err);
+ case 0:
+ case -ESTALE:
+ goto out;
+ case -NFS4ERR_EXPIRED:
+ nfs4_schedule_stateid_recovery(server, state);
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ nfs4_schedule_lease_recovery(server->nfs_client);
+ goto out;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session);
+ goto out;
+ case -ERESTARTSYS:
+ /*
+ * The show must go on: exit, but mark the
+ * stateid as needing recovery.
+ */
+ case -NFS4ERR_DELEG_REVOKED:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OPENMODE:
+ nfs4_schedule_stateid_recovery(server, state);
+ err = 0;
+ goto out;
+ case -EKEYEXPIRED:
+ /*
+ * User RPCSEC_GSS context has expired.
+ * We cannot recover this stateid now, so
+ * skip it and allow recovery thread to
+ * proceed.
+ */
+ err = 0;
+ goto out;
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
+ /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ err = 0;
+ goto out;
+ case -NFS4ERR_DELAY:
+ break;
+ }
+ err = nfs4_handle_exception(server, err, &exception);
+ } while (exception.retry);
+out:
+ return err;
+}
+
+struct nfs_release_lockowner_data {
+ struct nfs4_lock_state *lsp;
+ struct nfs_server *server;
+ struct nfs_release_lockowner_args args;
+};
+
+static void nfs4_release_lockowner_release(void *calldata)
+{
+ struct nfs_release_lockowner_data *data = calldata;
+ nfs4_free_lock_state(data->server, data->lsp);
+ kfree(calldata);
+}
+
+static const struct rpc_call_ops nfs4_release_lockowner_ops = {
+ .rpc_release = nfs4_release_lockowner_release,
+};
+
+int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
+{
+ struct nfs_server *server = lsp->ls_state->owner->so_server;
+ struct nfs_release_lockowner_data *data;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
+ };
+
+ if (server->nfs_client->cl_mvops->minor_version != 0)
+ return -EINVAL;
+ data = kmalloc(sizeof(*data), GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
+ data->lsp = lsp;
+ data->server = server;
+ data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
+ data->args.lock_owner.id = lsp->ls_seqid.owner_id;
+ data->args.lock_owner.s_dev = server->s_dev;
+ msg.rpc_argp = &data->args;
+ rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
+ return 0;
+}
+
+#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
+
+static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
+ const void *buf, size_t buflen,
+ int flags, int type)
+{
+ if (strcmp(key, "") != 0)
+ return -EINVAL;
+
+ return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
+}
+
+static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
+ void *buf, size_t buflen, int type)
+{
+ if (strcmp(key, "") != 0)
+ return -EINVAL;
+
+ return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
+}
+
+static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
+ size_t list_len, const char *name,
+ size_t name_len, int type)
+{
+ size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
+
+ if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
+ return 0;
+
+ if (list && len <= list_len)
+ memcpy(list, XATTR_NAME_NFSV4_ACL, len);
+ return len;
+}
+
+/*
+ * nfs_fhget will use either the mounted_on_fileid or the fileid
+ */
+static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
+{
+ if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
+ (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
+ (fattr->valid & NFS_ATTR_FATTR_FSID) &&
+ (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
+ return;
+
+ fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
+ NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
+ fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ fattr->nlink = 2;
+}
+
+static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
+ const struct qstr *name,
+ struct nfs4_fs_locations *fs_locations,
+ struct page *page)
+{
+ struct nfs_server *server = NFS_SERVER(dir);
+ u32 bitmask[2] = {
+ [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
+ };
+ struct nfs4_fs_locations_arg args = {
+ .dir_fh = NFS_FH(dir),
+ .name = name,
+ .page = page,
+ .bitmask = bitmask,
+ };
+ struct nfs4_fs_locations_res res = {
+ .fs_locations = fs_locations,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ dprintk("%s: start\n", __func__);
+
+ /* Ask for the fileid of the absent filesystem if mounted_on_fileid
+ * is not supported */
+ if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
+ bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
+ else
+ bitmask[0] |= FATTR4_WORD0_FILEID;
+
+ nfs_fattr_init(&fs_locations->fattr);
+ fs_locations->server = server;
+ fs_locations->nlocations = 0;
+ status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
+ dprintk("%s: returned status = %d\n", __func__, status);
+ return status;
+}
+
+int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
+ const struct qstr *name,
+ struct nfs4_fs_locations *fs_locations,
+ struct page *page)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
+{
+ int status;
+ struct nfs4_secinfo_arg args = {
+ .dir_fh = NFS_FH(dir),
+ .name = name,
+ };
+ struct nfs4_secinfo_res res = {
+ .flavors = flavors,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ dprintk("NFS call secinfo %s\n", name->name);
+ status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
+ dprintk("NFS reply secinfo: %d\n", status);
+ return status;
+}
+
+int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
+ struct nfs4_secinfo_flavors *flavors)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(NFS_SERVER(dir),
+ _nfs4_proc_secinfo(dir, name, flavors),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+#ifdef CONFIG_NFS_V4_1
+/*
+ * Check the exchange flags returned by the server for invalid flags, having
+ * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
+ * DS flags set.
+ */
+static int nfs4_check_cl_exchange_flags(u32 flags)
+{
+ if (flags & ~EXCHGID4_FLAG_MASK_R)
+ goto out_inval;
+ if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
+ (flags & EXCHGID4_FLAG_USE_NON_PNFS))
+ goto out_inval;
+ if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
+ goto out_inval;
+ return NFS_OK;
+out_inval:
+ return -NFS4ERR_INVAL;
+}
+
+static bool
+nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
+{
+ if (a->server_scope_sz == b->server_scope_sz &&
+ memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
+ return true;
+
+ return false;
+}
+
+/*
+ * nfs4_proc_exchange_id()
+ *
+ * Since the clientid has expired, all compounds using sessions
+ * associated with the stale clientid will be returning
+ * NFS4ERR_BADSESSION in the sequence operation, and will therefore
+ * be in some phase of session reset.
+ */
+int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ nfs4_verifier verifier;
+ struct nfs41_exchange_id_args args = {
+ .verifier = &verifier,
+ .client = clp,
+ .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
+ };
+ struct nfs41_exchange_id_res res = {
+ .client = clp,
+ };
+ int status;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ .rpc_cred = cred,
+ };
+
+ dprintk("--> %s\n", __func__);
+ BUG_ON(clp == NULL);
+
+ nfs4_construct_boot_verifier(clp, &verifier);
+
+ args.id_len = scnprintf(args.id, sizeof(args.id),
+ "%s/%s/%u",
+ clp->cl_ipaddr,
+ clp->cl_rpcclient->cl_nodename,
+ clp->cl_rpcclient->cl_auth->au_flavor);
+
+ res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL);
+ if (unlikely(!res.server_scope)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL);
+ if (unlikely(!res.impl_id)) {
+ status = -ENOMEM;
+ goto out_server_scope;
+ }
+
+ status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ if (!status)
+ status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
+
+ if (!status) {
+ /* use the most recent implementation id */
+ kfree(clp->impl_id);
+ clp->impl_id = res.impl_id;
+ } else
+ kfree(res.impl_id);
+
+ if (!status) {
+ if (clp->server_scope &&
+ !nfs41_same_server_scope(clp->server_scope,
+ res.server_scope)) {
+ dprintk("%s: server_scope mismatch detected\n",
+ __func__);
+ set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
+ kfree(clp->server_scope);
+ clp->server_scope = NULL;
+ }
+
+ if (!clp->server_scope) {
+ clp->server_scope = res.server_scope;
+ goto out;
+ }
+ }
+
+out_server_scope:
+ kfree(res.server_scope);
+out:
+ if (clp->impl_id)
+ dprintk("%s: Server Implementation ID: "
+ "domain: %s, name: %s, date: %llu,%u\n",
+ __func__, clp->impl_id->domain, clp->impl_id->name,
+ clp->impl_id->date.seconds,
+ clp->impl_id->date.nseconds);
+ dprintk("<-- %s status= %d\n", __func__, status);
+ return status;
+}
+
+struct nfs4_get_lease_time_data {
+ struct nfs4_get_lease_time_args *args;
+ struct nfs4_get_lease_time_res *res;
+ struct nfs_client *clp;
+};
+
+static void nfs4_get_lease_time_prepare(struct rpc_task *task,
+ void *calldata)
+{
+ int ret;
+ struct nfs4_get_lease_time_data *data =
+ (struct nfs4_get_lease_time_data *)calldata;
+
+ dprintk("--> %s\n", __func__);
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ /* just setup sequence, do not trigger session recovery
+ since we're invoked within one */
+ ret = nfs41_setup_sequence(data->clp->cl_session,
+ &data->args->la_seq_args,
+ &data->res->lr_seq_res, task);
+
+ BUG_ON(ret == -EAGAIN);
+ rpc_call_start(task);
+ dprintk("<-- %s\n", __func__);
+}
+
+/*
+ * Called from nfs4_state_manager thread for session setup, so don't recover
+ * from sequence operation or clientid errors.
+ */
+static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_get_lease_time_data *data =
+ (struct nfs4_get_lease_time_data *)calldata;
+
+ dprintk("--> %s\n", __func__);
+ if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
+ return;
+ switch (task->tk_status) {
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+ dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
+ rpc_delay(task, NFS4_POLL_RETRY_MIN);
+ task->tk_status = 0;
+ /* fall through */
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ dprintk("<-- %s\n", __func__);
+}
+
+static const struct rpc_call_ops nfs4_get_lease_time_ops = {
+ .rpc_call_prepare = nfs4_get_lease_time_prepare,
+ .rpc_call_done = nfs4_get_lease_time_done,
+};
+
+int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
+{
+ struct rpc_task *task;
+ struct nfs4_get_lease_time_args args;
+ struct nfs4_get_lease_time_res res = {
+ .lr_fsinfo = fsinfo,
+ };
+ struct nfs4_get_lease_time_data data = {
+ .args = &args,
+ .res = &res,
+ .clp = clp,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ struct rpc_task_setup task_setup = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_get_lease_time_ops,
+ .callback_data = &data,
+ .flags = RPC_TASK_TIMEOUT,
+ };
+ int status;
+
+ nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
+ dprintk("--> %s\n", __func__);
+ task = rpc_run_task(&task_setup);
+
+ if (IS_ERR(task))
+ status = PTR_ERR(task);
+ else {
+ status = task->tk_status;
+ rpc_put_task(task);
+ }
+ dprintk("<-- %s return %d\n", __func__, status);
+
+ return status;
+}
+
+static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
+{
+ return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
+}
+
+static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
+ struct nfs4_slot *new,
+ u32 max_slots,
+ u32 ivalue)
+{
+ struct nfs4_slot *old = NULL;
+ u32 i;
+
+ spin_lock(&tbl->slot_tbl_lock);
+ if (new) {
+ old = tbl->slots;
+ tbl->slots = new;
+ tbl->max_slots = max_slots;
+ }
+ tbl->highest_used_slotid = -1; /* no slot is currently used */
+ for (i = 0; i < tbl->max_slots; i++)
+ tbl->slots[i].seq_nr = ivalue;
+ spin_unlock(&tbl->slot_tbl_lock);
+ kfree(old);
+}
+
+/*
+ * (re)Initialise a slot table
+ */
+static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
+ u32 ivalue)
+{
+ struct nfs4_slot *new = NULL;
+ int ret = -ENOMEM;
+
+ dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
+ max_reqs, tbl->max_slots);
+
+ /* Does the newly negotiated max_reqs match the existing slot table? */
+ if (max_reqs != tbl->max_slots) {
+ new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
+ if (!new)
+ goto out;
+ }
+ ret = 0;
+
+ nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
+ dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
+ tbl, tbl->slots, tbl->max_slots);
+out:
+ dprintk("<-- %s: return %d\n", __func__, ret);
+ return ret;
+}
+
+/* Destroy the slot table */
+static void nfs4_destroy_slot_tables(struct nfs4_session *session)
+{
+ if (session->fc_slot_table.slots != NULL) {
+ kfree(session->fc_slot_table.slots);
+ session->fc_slot_table.slots = NULL;
+ }
+ if (session->bc_slot_table.slots != NULL) {
+ kfree(session->bc_slot_table.slots);
+ session->bc_slot_table.slots = NULL;
+ }
+ return;
+}
+
+/*
+ * Initialize or reset the forechannel and backchannel tables
+ */
+static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
+{
+ struct nfs4_slot_table *tbl;
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ /* Fore channel */
+ tbl = &ses->fc_slot_table;
+ status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
+ if (status) /* -ENOMEM */
+ return status;
+ /* Back channel */
+ tbl = &ses->bc_slot_table;
+ status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
+ if (status && tbl->slots == NULL)
+ /* Fore and back channel share a connection so get
+ * both slot tables or neither */
+ nfs4_destroy_slot_tables(ses);
+ return status;
+}
+
+struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session;
+ struct nfs4_slot_table *tbl;
+
+ session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
+ if (!session)
+ return NULL;
+
+ tbl = &session->fc_slot_table;
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+ init_completion(&tbl->complete);
+
+ tbl = &session->bc_slot_table;
+ tbl->highest_used_slotid = NFS4_NO_SLOT;
+ spin_lock_init(&tbl->slot_tbl_lock);
+ rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+ init_completion(&tbl->complete);
+
+ session->session_state = 1<<NFS4_SESSION_INITING;
+
+ session->clp = clp;
+ return session;
+}
+
+void nfs4_destroy_session(struct nfs4_session *session)
+{
+ struct rpc_xprt *xprt;
+
+ nfs4_proc_destroy_session(session);
+
+ rcu_read_lock();
+ xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
+ rcu_read_unlock();
+ dprintk("%s Destroy backchannel for xprt %p\n",
+ __func__, xprt);
+ xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
+ nfs4_destroy_slot_tables(session);
+ kfree(session);
+}
+
+/*
+ * Initialize the values to be used by the client in CREATE_SESSION
+ * If nfs4_init_session set the fore channel request and response sizes,
+ * use them.
+ *
+ * Set the back channel max_resp_sz_cached to zero to force the client to
+ * always set csa_cachethis to FALSE because the current implementation
+ * of the back channel DRC only supports caching the CB_SEQUENCE operation.
+ */
+static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
+{
+ struct nfs4_session *session = args->client->cl_session;
+ unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
+ mxresp_sz = session->fc_attrs.max_resp_sz;
+
+ if (mxrqst_sz == 0)
+ mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
+ if (mxresp_sz == 0)
+ mxresp_sz = NFS_MAX_FILE_IO_SIZE;
+ /* Fore channel attributes */
+ args->fc_attrs.max_rqst_sz = mxrqst_sz;
+ args->fc_attrs.max_resp_sz = mxresp_sz;
+ args->fc_attrs.max_ops = NFS4_MAX_OPS;
+ args->fc_attrs.max_reqs = max_session_slots;
+
+ dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
+ "max_ops=%u max_reqs=%u\n",
+ __func__,
+ args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
+ args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
+
+ /* Back channel attributes */
+ args->bc_attrs.max_rqst_sz = PAGE_SIZE;
+ args->bc_attrs.max_resp_sz = PAGE_SIZE;
+ args->bc_attrs.max_resp_sz_cached = 0;
+ args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
+ args->bc_attrs.max_reqs = 1;
+
+ dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
+ "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
+ __func__,
+ args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
+ args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
+ args->bc_attrs.max_reqs);
+}
+
+static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+{
+ struct nfs4_channel_attrs *sent = &args->fc_attrs;
+ struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
+
+ if (rcvd->max_resp_sz > sent->max_resp_sz)
+ return -EINVAL;
+ /*
+ * Our requested max_ops is the minimum we need; we're not
+ * prepared to break up compounds into smaller pieces than that.
+ * So, no point even trying to continue if the server won't
+ * cooperate:
+ */
+ if (rcvd->max_ops < sent->max_ops)
+ return -EINVAL;
+ if (rcvd->max_reqs == 0)
+ return -EINVAL;
+ if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
+ rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
+ return 0;
+}
+
+static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+{
+ struct nfs4_channel_attrs *sent = &args->bc_attrs;
+ struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
+
+ if (rcvd->max_rqst_sz > sent->max_rqst_sz)
+ return -EINVAL;
+ if (rcvd->max_resp_sz < sent->max_resp_sz)
+ return -EINVAL;
+ if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
+ return -EINVAL;
+ /* These would render the backchannel useless: */
+ if (rcvd->max_ops != sent->max_ops)
+ return -EINVAL;
+ if (rcvd->max_reqs != sent->max_reqs)
+ return -EINVAL;
+ return 0;
+}
+
+static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
+ struct nfs4_session *session)
+{
+ int ret;
+
+ ret = nfs4_verify_fore_channel_attrs(args, session);
+ if (ret)
+ return ret;
+ return nfs4_verify_back_channel_attrs(args, session);
+}
+
+static int _nfs4_proc_create_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session = clp->cl_session;
+ struct nfs41_create_session_args args = {
+ .client = clp,
+ .cb_program = NFS4_CALLBACK,
+ };
+ struct nfs41_create_session_res res = {
+ .client = clp,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ nfs4_init_channel_attrs(&args);
+ args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
+
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+
+ if (!status)
+ /* Verify the session's negotiated channel_attrs values */
+ status = nfs4_verify_channel_attrs(&args, session);
+ if (!status) {
+ /* Increment the clientid slot sequence id */
+ clp->cl_seqid++;
+ }
+
+ return status;
+}
+
+/*
+ * Issues a CREATE_SESSION operation to the server.
+ * It is the responsibility of the caller to verify the session is
+ * expired before calling this routine.
+ */
+int nfs4_proc_create_session(struct nfs_client *clp)
+{
+ int status;
+ unsigned *ptr;
+ struct nfs4_session *session = clp->cl_session;
+
+ dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
+
+ status = _nfs4_proc_create_session(clp);
+ if (status)
+ goto out;
+
+ /* Init or reset the session slot tables */
+ status = nfs4_setup_session_slot_tables(session);
+ dprintk("slot table setup returned %d\n", status);
+ if (status)
+ goto out;
+
+ ptr = (unsigned *)&session->sess_id.data[0];
+ dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
+ clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
+out:
+ dprintk("<-- %s\n", __func__);
+ return status;
+}
+
+/*
+ * Issue the over-the-wire RPC DESTROY_SESSION.
+ * The caller must serialize access to this routine.
+ */
+int nfs4_proc_destroy_session(struct nfs4_session *session)
+{
+ int status = 0;
+ struct rpc_message msg;
+
+ dprintk("--> nfs4_proc_destroy_session\n");
+
+ /* session is still being setup */
+ if (session->clp->cl_cons_state != NFS_CS_READY)
+ return status;
+
+ msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
+ msg.rpc_argp = session;
+ msg.rpc_resp = NULL;
+ msg.rpc_cred = NULL;
+ status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+
+ if (status)
+ printk(KERN_WARNING
+ "NFS: Got error %d from the server on DESTROY_SESSION. "
+ "Session has been destroyed regardless...\n", status);
+
+ dprintk("<-- nfs4_proc_destroy_session\n");
+ return status;
+}
+
+int nfs4_init_session(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_session *session;
+ unsigned int rsize, wsize;
+ int ret;
+
+ if (!nfs4_has_session(clp))
+ return 0;
+
+ session = clp->cl_session;
+ if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
+ return 0;
+
+ rsize = server->rsize;
+ if (rsize == 0)
+ rsize = NFS_MAX_FILE_IO_SIZE;
+ wsize = server->wsize;
+ if (wsize == 0)
+ wsize = NFS_MAX_FILE_IO_SIZE;
+
+ session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
+ session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
+
+ ret = nfs4_recover_expired_lease(server);
+ if (!ret)
+ ret = nfs4_check_client_ready(clp);
+ return ret;
+}
+
+int nfs4_init_ds_session(struct nfs_client *clp)
+{
+ struct nfs4_session *session = clp->cl_session;
+ int ret;
+
+ if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state))
+ return 0;
+
+ ret = nfs4_client_recover_expired_lease(clp);
+ if (!ret)
+ /* Test for the DS role */
+ if (!is_ds_client(clp))
+ ret = -ENODEV;
+ if (!ret)
+ ret = nfs4_check_client_ready(clp);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
+
+
+/*
+ * Renew the cl_session lease.
+ */
+struct nfs4_sequence_data {
+ struct nfs_client *clp;
+ struct nfs4_sequence_args args;
+ struct nfs4_sequence_res res;
+};
+
+static void nfs41_sequence_release(void *data)
+{
+ struct nfs4_sequence_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
+
+ if (atomic_read(&clp->cl_count) > 1)
+ nfs4_schedule_state_renewal(clp);
+ nfs_put_client(clp);
+ kfree(calldata);
+}
+
+static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
+{
+ switch(task->tk_status) {
+ case -NFS4ERR_DELAY:
+ rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ return -EAGAIN;
+ default:
+ nfs4_schedule_lease_recovery(clp);
+ }
+ return 0;
+}
+
+static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
+{
+ struct nfs4_sequence_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
+
+ if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
+ return;
+
+ if (task->tk_status < 0) {
+ dprintk("%s ERROR %d\n", __func__, task->tk_status);
+ if (atomic_read(&clp->cl_count) == 1)
+ goto out;
+
+ if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ }
+ dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
+out:
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_sequence_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
+ struct nfs4_sequence_args *args;
+ struct nfs4_sequence_res *res;
+
+ args = task->tk_msg.rpc_argp;
+ res = task->tk_msg.rpc_resp;
+
+ if (nfs41_setup_sequence(clp->cl_session, args, res, task))
+ return;
+ rpc_call_start(task);
+}
+
+static const struct rpc_call_ops nfs41_sequence_ops = {
+ .rpc_call_done = nfs41_sequence_call_done,
+ .rpc_call_prepare = nfs41_sequence_prepare,
+ .rpc_release = nfs41_sequence_release,
+};
+
+static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct nfs4_sequence_data *calldata;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
+ .rpc_cred = cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs41_sequence_ops,
+ .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
+ };
+
+ if (!atomic_inc_not_zero(&clp->cl_count))
+ return ERR_PTR(-EIO);
+ calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
+ if (calldata == NULL) {
+ nfs_put_client(clp);
+ return ERR_PTR(-ENOMEM);
+ }
+ nfs41_init_sequence(&calldata->args, &calldata->res, 0);
+ msg.rpc_argp = &calldata->args;
+ msg.rpc_resp = &calldata->res;
+ calldata->clp = clp;
+ task_setup_data.callback_data = calldata;
+
+ return rpc_run_task(&task_setup_data);
+}
+
+static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
+{
+ struct rpc_task *task;
+ int ret = 0;
+
+ if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
+ return 0;
+ task = _nfs41_proc_sequence(clp, cred);
+ if (IS_ERR(task))
+ ret = PTR_ERR(task);
+ else
+ rpc_put_task_async(task);
+ dprintk("<-- %s status=%d\n", __func__, ret);
+ return ret;
+}
+
+static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct rpc_task *task;
+ int ret;
+
+ task = _nfs41_proc_sequence(clp, cred);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+ }
+ ret = rpc_wait_for_completion_task(task);
+ if (!ret) {
+ struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
+
+ if (task->tk_status == 0)
+ nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+ ret = task->tk_status;
+ }
+ rpc_put_task(task);
+out:
+ dprintk("<-- %s status=%d\n", __func__, ret);
+ return ret;
+}
+
+struct nfs4_reclaim_complete_data {
+ struct nfs_client *clp;
+ struct nfs41_reclaim_complete_args arg;
+ struct nfs41_reclaim_complete_res res;
+};
+
+static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+
+ rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
+ if (nfs41_setup_sequence(calldata->clp->cl_session,
+ &calldata->arg.seq_args,
+ &calldata->res.seq_res, task))
+ return;
+
+ rpc_call_start(task);
+}
+
+static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
+{
+ switch(task->tk_status) {
+ case 0:
+ case -NFS4ERR_COMPLETE_ALREADY:
+ case -NFS4ERR_WRONG_CRED: /* What to do here? */
+ break;
+ case -NFS4ERR_DELAY:
+ rpc_delay(task, NFS4_POLL_RETRY_MAX);
+ /* fall through */
+ case -NFS4ERR_RETRY_UNCACHED_REP:
+ return -EAGAIN;
+ default:
+ nfs4_schedule_lease_recovery(clp);
+ }
+ return 0;
+}
+
+static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+ struct nfs_client *clp = calldata->clp;
+ struct nfs4_sequence_res *res = &calldata->res.seq_res;
+
+ dprintk("--> %s\n", __func__);
+ if (!nfs41_sequence_done(task, res))
+ return;
+
+ if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs4_free_reclaim_complete_data(void *data)
+{
+ struct nfs4_reclaim_complete_data *calldata = data;
+
+ kfree(calldata);
+}
+
+static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
+ .rpc_call_prepare = nfs4_reclaim_complete_prepare,
+ .rpc_call_done = nfs4_reclaim_complete_done,
+ .rpc_release = nfs4_free_reclaim_complete_data,
+};
+
+/*
+ * Issue a global reclaim complete.
+ */
+static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
+{
+ struct nfs4_reclaim_complete_data *calldata;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_reclaim_complete_call_ops,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status = -ENOMEM;
+
+ dprintk("--> %s\n", __func__);
+ calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
+ if (calldata == NULL)
+ goto out;
+ calldata->clp = clp;
+ calldata->arg.one_fs = 0;
+
+ nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
+ msg.rpc_argp = &calldata->arg;
+ msg.rpc_resp = &calldata->res;
+ task_setup_data.callback_data = calldata;
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task)) {
+ status = PTR_ERR(task);
+ goto out;
+ }
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status == 0)
+ status = task->tk_status;
+ rpc_put_task(task);
+ return 0;
+out:
+ dprintk("<-- %s status=%d\n", __func__, status);
+ return status;
+}
+
+static void
+nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+
+ dprintk("--> %s\n", __func__);
+ /* Note the is a race here, where a CB_LAYOUTRECALL can come in
+ * right now covering the LAYOUTGET we are about to send.
+ * However, that is not so catastrophic, and there seems
+ * to be no way to prevent it completely.
+ */
+ if (nfs4_setup_sequence(server, &lgp->args.seq_args,
+ &lgp->res.seq_res, task))
+ return;
+ if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
+ NFS_I(lgp->args.inode)->layout,
+ lgp->args.ctx->state)) {
+ rpc_exit(task, NFS4_OK);
+ return;
+ }
+ rpc_call_start(task);
+}
+
+static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+
+ dprintk("--> %s\n", __func__);
+
+ if (!nfs4_sequence_done(task, &lgp->res.seq_res))
+ return;
+
+ switch (task->tk_status) {
+ case 0:
+ break;
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
+ task->tk_status = -NFS4ERR_DELAY;
+ /* Fall through */
+ default:
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ }
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs4_layoutget_release(void *calldata)
+{
+ struct nfs4_layoutget *lgp = calldata;
+
+ dprintk("--> %s\n", __func__);
+ put_nfs_open_context(lgp->args.ctx);
+ kfree(calldata);
+ dprintk("<-- %s\n", __func__);
+}
+
+static const struct rpc_call_ops nfs4_layoutget_call_ops = {
+ .rpc_call_prepare = nfs4_layoutget_prepare,
+ .rpc_call_done = nfs4_layoutget_done,
+ .rpc_release = nfs4_layoutget_release,
+};
+
+int nfs4_proc_layoutget(struct nfs4_layoutget *lgp)
+{
+ struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
+ .rpc_argp = &lgp->args,
+ .rpc_resp = &lgp->res,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = server->client,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_layoutget_call_ops,
+ .callback_data = lgp,
+ .flags = RPC_TASK_ASYNC,
+ };
+ int status = 0;
+
+ dprintk("--> %s\n", __func__);
+
+ lgp->res.layoutp = &lgp->args.layout;
+ lgp->res.seq_res.sr_slot = NULL;
+ nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status == 0)
+ status = task->tk_status;
+ if (status == 0)
+ status = pnfs_layout_process(lgp);
+ rpc_put_task(task);
+ dprintk("<-- %s status=%d\n", __func__, status);
+ return status;
+}
+
+static void
+nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutreturn *lrp = calldata;
+
+ dprintk("--> %s\n", __func__);
+ if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
+ &lrp->res.seq_res, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutreturn *lrp = calldata;
+ struct nfs_server *server;
+ struct pnfs_layout_hdr *lo = lrp->args.layout;
+
+ dprintk("--> %s\n", __func__);
+
+ if (!nfs4_sequence_done(task, &lrp->res.seq_res))
+ return;
+
+ server = NFS_SERVER(lrp->args.inode);
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ spin_lock(&lo->plh_inode->i_lock);
+ if (task->tk_status == 0) {
+ if (lrp->res.lrs_present) {
+ pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+ } else
+ BUG_ON(!list_empty(&lo->plh_segs));
+ }
+ lo->plh_block_lgets--;
+ spin_unlock(&lo->plh_inode->i_lock);
+ dprintk("<-- %s\n", __func__);
+}
+
+static void nfs4_layoutreturn_release(void *calldata)
+{
+ struct nfs4_layoutreturn *lrp = calldata;
+
+ dprintk("--> %s\n", __func__);
+ put_layout_hdr(lrp->args.layout);
+ kfree(calldata);
+ dprintk("<-- %s\n", __func__);
+}
+
+static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
+ .rpc_call_prepare = nfs4_layoutreturn_prepare,
+ .rpc_call_done = nfs4_layoutreturn_done,
+ .rpc_release = nfs4_layoutreturn_release,
+};
+
+int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
+{
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
+ .rpc_argp = &lrp->args,
+ .rpc_resp = &lrp->res,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = lrp->clp->cl_rpcclient,
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_layoutreturn_call_ops,
+ .callback_data = lrp,
+ };
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ status = task->tk_status;
+ dprintk("<-- %s status=%d\n", __func__, status);
+ rpc_put_task(task);
+ return status;
+}
+
+/*
+ * Retrieve the list of Data Server devices from the MDS.
+ */
+static int _nfs4_getdevicelist(struct nfs_server *server,
+ const struct nfs_fh *fh,
+ struct pnfs_devicelist *devlist)
+{
+ struct nfs4_getdevicelist_args args = {
+ .fh = fh,
+ .layoutclass = server->pnfs_curr_ld->id,
+ };
+ struct nfs4_getdevicelist_res res = {
+ .devlist = devlist,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
+ &res.seq_res, 0);
+ dprintk("<-- %s status=%d\n", __func__, status);
+ return status;
+}
+
+int nfs4_proc_getdevicelist(struct nfs_server *server,
+ const struct nfs_fh *fh,
+ struct pnfs_devicelist *devlist)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_getdevicelist(server, fh, devlist),
+ &exception);
+ } while (exception.retry);
+
+ dprintk("%s: err=%d, num_devs=%u\n", __func__,
+ err, devlist->num_devs);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
+
+static int
+_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
+{
+ struct nfs4_getdeviceinfo_args args = {
+ .pdev = pdev,
+ };
+ struct nfs4_getdeviceinfo_res res = {
+ .pdev = pdev,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ dprintk("--> %s\n", __func__);
+ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+ dprintk("<-- %s status=%d\n", __func__, status);
+
+ return status;
+}
+
+int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
+{
+ struct nfs4_exception exception = { };
+ int err;
+
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_proc_getdeviceinfo(server, pdev),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
+
+static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->args.inode);
+
+ if (nfs4_setup_sequence(server, &data->args.seq_args,
+ &data->res.seq_res, task))
+ return;
+ rpc_call_start(task);
+}
+
+static void
+nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+ struct nfs_server *server = NFS_SERVER(data->args.inode);
+
+ if (!nfs4_sequence_done(task, &data->res.seq_res))
+ return;
+
+ switch (task->tk_status) { /* Just ignore these failures */
+ case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
+ case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
+ case -NFS4ERR_BADLAYOUT: /* no layout */
+ case -NFS4ERR_GRACE: /* loca_recalim always false */
+ task->tk_status = 0;
+ break;
+ case 0:
+ nfs_post_op_update_inode_force_wcc(data->args.inode,
+ data->res.fattr);
+ break;
+ default:
+ if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ }
+}
+
+static void nfs4_layoutcommit_release(void *calldata)
+{
+ struct nfs4_layoutcommit_data *data = calldata;
+ struct pnfs_layout_segment *lseg, *tmp;
+ unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
+
+ pnfs_cleanup_layoutcommit(data);
+ /* Matched by references in pnfs_set_layoutcommit */
+ list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
+ list_del_init(&lseg->pls_lc_list);
+ if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
+ &lseg->pls_flags))
+ put_lseg(lseg);
+ }
+
+ clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
+ smp_mb__after_clear_bit();
+ wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
+
+ put_rpccred(data->cred);
+ kfree(data);
+}
+
+static const struct rpc_call_ops nfs4_layoutcommit_ops = {
+ .rpc_call_prepare = nfs4_layoutcommit_prepare,
+ .rpc_call_done = nfs4_layoutcommit_done,
+ .rpc_release = nfs4_layoutcommit_release,
+};
+
+int
+nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .task = &data->task,
+ .rpc_client = NFS_CLIENT(data->args.inode),
+ .rpc_message = &msg,
+ .callback_ops = &nfs4_layoutcommit_ops,
+ .callback_data = data,
+ .flags = RPC_TASK_ASYNC,
+ };
+ struct rpc_task *task;
+ int status = 0;
+
+ dprintk("NFS: %4d initiating layoutcommit call. sync %d "
+ "lbw: %llu inode %lu\n",
+ data->task.tk_pid, sync,
+ data->args.lastbytewritten,
+ data->args.inode->i_ino);
+
+ nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ if (sync == false)
+ goto out;
+ status = nfs4_wait_for_completion_rpc_task(task);
+ if (status != 0)
+ goto out;
+ status = task->tk_status;
+out:
+ dprintk("%s: status %d\n", __func__, status);
+ rpc_put_task(task);
+ return status;
+}
+
+static int
+_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
+{
+ struct nfs41_secinfo_no_name_args args = {
+ .style = SECINFO_STYLE_CURRENT_FH,
+ };
+ struct nfs4_secinfo_res res = {
+ .flavors = flavors,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
+}
+
+static int
+nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
+ switch (err) {
+ case 0:
+ case -NFS4ERR_WRONGSEC:
+ case -NFS4ERR_NOTSUPP:
+ goto out;
+ default:
+ err = nfs4_handle_exception(server, err, &exception);
+ }
+ } while (exception.retry);
+out:
+ return err;
+}
+
+static int
+nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ int err;
+ struct page *page;
+ rpc_authflavor_t flavor;
+ struct nfs4_secinfo_flavors *flavors;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ flavors = page_address(page);
+ err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
+
+ /*
+ * Fall back on "guess and check" method if
+ * the server doesn't support SECINFO_NO_NAME
+ */
+ if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
+ err = nfs4_find_root_sec(server, fhandle, info);
+ goto out_freepage;
+ }
+ if (err)
+ goto out_freepage;
+
+ flavor = nfs_find_best_sec(flavors);
+ if (err == 0)
+ err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
+
+out_freepage:
+ put_page(page);
+ if (err == -EACCES)
+ return -EPERM;
+out:
+ return err;
+}
+
+static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
+{
+ int status;
+ struct nfs41_test_stateid_args args = {
+ .stateid = stateid,
+ };
+ struct nfs41_test_stateid_res res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
+ status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
+
+ if (status == NFS_OK)
+ return res.status;
+ return status;
+}
+
+static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs41_test_stateid(server, stateid),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
+{
+ struct nfs41_free_stateid_args args = {
+ .stateid = stateid,
+ };
+ struct nfs41_free_stateid_res res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+
+ nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
+ return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
+}
+
+static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
+{
+ struct nfs4_exception exception = { };
+ int err;
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_free_stateid(server, stateid),
+ &exception);
+ } while (exception.retry);
+ return err;
+}
+
+static bool nfs41_match_stateid(const nfs4_stateid *s1,
+ const nfs4_stateid *s2)
+{
+ if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
+ return false;
+
+ if (s1->seqid == s2->seqid)
+ return true;
+ if (s1->seqid == 0 || s2->seqid == 0)
+ return true;
+
+ return false;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+static bool nfs4_match_stateid(const nfs4_stateid *s1,
+ const nfs4_stateid *s2)
+{
+ return nfs4_stateid_match(s1, s2);
+}
+
+
+static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
+ .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
+ .recover_open = nfs4_open_reclaim,
+ .recover_lock = nfs4_lock_reclaim,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
+ .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
+ .recover_open = nfs4_open_reclaim,
+ .recover_lock = nfs4_lock_reclaim,
+ .establish_clid = nfs41_init_clientid,
+ .get_clid_cred = nfs4_get_exchange_id_cred,
+ .reclaim_complete = nfs41_proc_reclaim_complete,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
+static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
+ .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
+ .recover_open = nfs4_open_expired,
+ .recover_lock = nfs4_lock_expired,
+ .establish_clid = nfs4_init_clientid,
+ .get_clid_cred = nfs4_get_setclientid_cred,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
+ .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
+ .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
+ .recover_open = nfs41_open_expired,
+ .recover_lock = nfs41_lock_expired,
+ .establish_clid = nfs41_init_clientid,
+ .get_clid_cred = nfs4_get_exchange_id_cred,
+};
+#endif /* CONFIG_NFS_V4_1 */
+
+static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
+ .sched_state_renewal = nfs4_proc_async_renew,
+ .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
+ .renew_lease = nfs4_proc_renew,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
+ .sched_state_renewal = nfs41_proc_async_sequence,
+ .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
+ .renew_lease = nfs4_proc_sequence,
+};
+#endif
+
+static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
+ .minor_version = 0,
+ .call_sync = _nfs4_call_sync,
+ .match_stateid = nfs4_match_stateid,
+ .find_root_sec = nfs4_find_root_sec,
+ .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
+ .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
+ .state_renewal_ops = &nfs40_state_renewal_ops,
+};
+
+#if defined(CONFIG_NFS_V4_1)
+static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
+ .minor_version = 1,
+ .call_sync = _nfs4_call_sync_session,
+ .match_stateid = nfs41_match_stateid,
+ .find_root_sec = nfs41_find_root_sec,
+ .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
+ .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
+ .state_renewal_ops = &nfs41_state_renewal_ops,
+};
+#endif
+
+const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+ [0] = &nfs_v4_0_minor_ops,
+#if defined(CONFIG_NFS_V4_1)
+ [1] = &nfs_v4_1_minor_ops,
+#endif
+};
+
+static const struct inode_operations nfs4_file_inode_operations = {
+ .permission = nfs_permission,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+ .getxattr = generic_getxattr,
+ .setxattr = generic_setxattr,
+ .listxattr = generic_listxattr,
+ .removexattr = generic_removexattr,
+};
+
+const struct nfs_rpc_ops nfs_v4_clientops = {
+ .version = 4, /* protocol version */
+ .dentry_ops = &nfs4_dentry_operations,
+ .dir_inode_ops = &nfs4_dir_inode_operations,
+ .file_inode_ops = &nfs4_file_inode_operations,
+ .file_ops = &nfs4_file_operations,
+ .getroot = nfs4_proc_get_root,
+ .getattr = nfs4_proc_getattr,
+ .setattr = nfs4_proc_setattr,
+ .lookup = nfs4_proc_lookup,
+ .access = nfs4_proc_access,
+ .readlink = nfs4_proc_readlink,
+ .create = nfs4_proc_create,
+ .remove = nfs4_proc_remove,
+ .unlink_setup = nfs4_proc_unlink_setup,
+ .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
+ .unlink_done = nfs4_proc_unlink_done,
+ .rename = nfs4_proc_rename,
+ .rename_setup = nfs4_proc_rename_setup,
+ .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
+ .rename_done = nfs4_proc_rename_done,
+ .link = nfs4_proc_link,
+ .symlink = nfs4_proc_symlink,
+ .mkdir = nfs4_proc_mkdir,
+ .rmdir = nfs4_proc_remove,
+ .readdir = nfs4_proc_readdir,
+ .mknod = nfs4_proc_mknod,
+ .statfs = nfs4_proc_statfs,
+ .fsinfo = nfs4_proc_fsinfo,
+ .pathconf = nfs4_proc_pathconf,
+ .set_capabilities = nfs4_server_capabilities,
+ .decode_dirent = nfs4_decode_dirent,
+ .read_setup = nfs4_proc_read_setup,
+ .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
+ .read_done = nfs4_read_done,
+ .write_setup = nfs4_proc_write_setup,
+ .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
+ .write_done = nfs4_write_done,
+ .commit_setup = nfs4_proc_commit_setup,
+ .commit_done = nfs4_commit_done,
+ .lock = nfs4_proc_lock,
+ .clear_acl_cache = nfs4_zap_acl_attr,
+ .close_context = nfs4_close_context,
+ .open_context = nfs4_atomic_open,
+ .init_client = nfs4_init_client,
+ .secinfo = nfs4_proc_secinfo,
+};
+
+static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
+ .prefix = XATTR_NAME_NFSV4_ACL,
+ .list = nfs4_xattr_list_nfs4_acl,
+ .get = nfs4_xattr_get_nfs4_acl,
+ .set = nfs4_xattr_set_nfs4_acl,
+};
+
+const struct xattr_handler *nfs4_xattr_handlers[] = {
+ &nfs4_xattr_nfs4_acl_handler,
+ NULL
+};
+
+module_param(max_session_slots, ushort, 0644);
+MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
+ "requests the client will negotiate");
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
new file mode 100644
index 00000000..dc484c0e
--- /dev/null
+++ b/fs/nfs/nfs4renewd.c
@@ -0,0 +1,136 @@
+/*
+ * fs/nfs/nfs4renewd.c
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Implementation of the NFSv4 "renew daemon", which wakes up periodically to
+ * send a RENEW, to keep state alive on the server. The daemon is implemented
+ * as an rpc_task, not a real kernel thread, so it always runs in rpciod's
+ * context. There is one renewd per nfs_server.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/clnt.h>
+
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include "nfs4_fs.h"
+#include "delegation.h"
+
+#define NFSDBG_FACILITY NFSDBG_PROC
+
+void
+nfs4_renew_state(struct work_struct *work)
+{
+ const struct nfs4_state_maintenance_ops *ops;
+ struct nfs_client *clp =
+ container_of(work, struct nfs_client, cl_renewd.work);
+ struct rpc_cred *cred;
+ long lease;
+ unsigned long last, now;
+ unsigned renew_flags = 0;
+
+ ops = clp->cl_mvops->state_renewal_ops;
+ dprintk("%s: start\n", __func__);
+
+ if (test_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state))
+ goto out;
+
+ spin_lock(&clp->cl_lock);
+ lease = clp->cl_lease_time;
+ last = clp->cl_last_renewal;
+ now = jiffies;
+ /* Are we close to a lease timeout? */
+ if (time_after(now, last + lease/3))
+ renew_flags |= NFS4_RENEW_TIMEOUT;
+ if (nfs_delegations_present(clp))
+ renew_flags |= NFS4_RENEW_DELEGATION_CB;
+
+ if (renew_flags != 0) {
+ cred = ops->get_state_renewal_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
+ if (cred == NULL) {
+ if (!(renew_flags & NFS4_RENEW_DELEGATION_CB)) {
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ goto out;
+ }
+ nfs_expire_all_delegations(clp);
+ } else {
+ /* Queue an asynchronous RENEW. */
+ ops->sched_state_renewal(clp, cred, renew_flags);
+ put_rpccred(cred);
+ goto out_exp;
+ }
+ } else {
+ dprintk("%s: failed to call renewd. Reason: lease not expired \n",
+ __func__);
+ spin_unlock(&clp->cl_lock);
+ }
+ nfs4_schedule_state_renewal(clp);
+out_exp:
+ nfs_expire_unreferenced_delegations(clp);
+out:
+ dprintk("%s: done\n", __func__);
+}
+
+void
+nfs4_schedule_state_renewal(struct nfs_client *clp)
+{
+ long timeout;
+
+ spin_lock(&clp->cl_lock);
+ timeout = (2 * clp->cl_lease_time) / 3 + (long)clp->cl_last_renewal
+ - (long)jiffies;
+ if (timeout < 5 * HZ)
+ timeout = 5 * HZ;
+ dprintk("%s: requeueing work. Lease period = %ld\n",
+ __func__, (timeout + HZ - 1) / HZ);
+ cancel_delayed_work(&clp->cl_renewd);
+ schedule_delayed_work(&clp->cl_renewd, timeout);
+ set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
+ spin_unlock(&clp->cl_lock);
+}
+
+void
+nfs4_kill_renewd(struct nfs_client *clp)
+{
+ cancel_delayed_work_sync(&clp->cl_renewd);
+}
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
new file mode 100644
index 00000000..7f0fcfc1
--- /dev/null
+++ b/fs/nfs/nfs4state.c
@@ -0,0 +1,1877 @@
+/*
+ * fs/nfs/nfs4state.c
+ *
+ * Client-side XDR for NFSv4.
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Implementation of the NFSv4 state model. For the time being,
+ * this is minimal, but will be made much more complex in a
+ * subsequent patch.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_idmap.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/jiffies.h>
+
+#include "nfs4_fs.h"
+#include "callback.h"
+#include "delegation.h"
+#include "internal.h"
+#include "pnfs.h"
+
+#define OPENOWNER_POOL_SIZE 8
+
+const nfs4_stateid zero_stateid;
+
+static LIST_HEAD(nfs4_clientid_list);
+
+int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ struct nfs4_setclientid_res clid = {
+ .clientid = clp->cl_clientid,
+ .confirm = clp->cl_confirm,
+ };
+ unsigned short port;
+ int status;
+
+ if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
+ goto do_confirm;
+ port = nfs_callback_tcpport;
+ if (clp->cl_addr.ss_family == AF_INET6)
+ port = nfs_callback_tcpport6;
+
+ status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
+ if (status != 0)
+ goto out;
+ clp->cl_clientid = clid.clientid;
+ clp->cl_confirm = clid.confirm;
+ set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+do_confirm:
+ status = nfs4_proc_setclientid_confirm(clp, &clid, cred);
+ if (status != 0)
+ goto out;
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ nfs4_schedule_state_renewal(clp);
+out:
+ return status;
+}
+
+struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
+{
+ struct rpc_cred *cred = NULL;
+
+ if (clp->cl_machine_cred != NULL)
+ cred = get_rpccred(clp->cl_machine_cred);
+ return cred;
+}
+
+static void nfs4_clear_machine_cred(struct nfs_client *clp)
+{
+ struct rpc_cred *cred;
+
+ spin_lock(&clp->cl_lock);
+ cred = clp->cl_machine_cred;
+ clp->cl_machine_cred = NULL;
+ spin_unlock(&clp->cl_lock);
+ if (cred != NULL)
+ put_rpccred(cred);
+}
+
+static struct rpc_cred *
+nfs4_get_renew_cred_server_locked(struct nfs_server *server)
+{
+ struct rpc_cred *cred = NULL;
+ struct nfs4_state_owner *sp;
+ struct rb_node *pos;
+
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+ pos = rb_next(pos)) {
+ sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
+ if (list_empty(&sp->so_states))
+ continue;
+ cred = get_rpccred(sp->so_cred);
+ break;
+ }
+ return cred;
+}
+
+/**
+ * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ * Caller must hold clp->cl_lock.
+ */
+struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+{
+ struct rpc_cred *cred = NULL;
+ struct nfs_server *server;
+
+ /* Use machine credentials if available */
+ cred = nfs4_get_machine_cred_locked(clp);
+ if (cred != NULL)
+ goto out;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ cred = nfs4_get_renew_cred_server_locked(server);
+ if (cred != NULL)
+ break;
+ }
+ rcu_read_unlock();
+
+out:
+ return cred;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+
+static int nfs41_setup_state_renewal(struct nfs_client *clp)
+{
+ int status;
+ struct nfs_fsinfo fsinfo;
+
+ if (!test_bit(NFS_CS_CHECK_LEASE_TIME, &clp->cl_res_state)) {
+ nfs4_schedule_state_renewal(clp);
+ return 0;
+ }
+
+ status = nfs4_proc_get_lease_time(clp, &fsinfo);
+ if (status == 0) {
+ /* Update lease time and schedule renewal */
+ spin_lock(&clp->cl_lock);
+ clp->cl_lease_time = fsinfo.lease_time * HZ;
+ clp->cl_last_renewal = jiffies;
+ spin_unlock(&clp->cl_lock);
+
+ nfs4_schedule_state_renewal(clp);
+ }
+
+ return status;
+}
+
+/*
+ * Back channel returns NFS4ERR_DELAY for new requests when
+ * NFS4_SESSION_DRAINING is set so there is no work to be done when draining
+ * is ended.
+ */
+static void nfs4_end_drain_session(struct nfs_client *clp)
+{
+ struct nfs4_session *ses = clp->cl_session;
+ struct nfs4_slot_table *tbl;
+ int max_slots;
+
+ if (ses == NULL)
+ return;
+ tbl = &ses->fc_slot_table;
+ if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
+ spin_lock(&tbl->slot_tbl_lock);
+ max_slots = tbl->max_slots;
+ while (max_slots--) {
+ if (rpc_wake_up_first(&tbl->slot_tbl_waitq,
+ nfs4_set_task_privileged,
+ NULL) == NULL)
+ break;
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+ }
+}
+
+static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
+{
+ spin_lock(&tbl->slot_tbl_lock);
+ if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
+ INIT_COMPLETION(tbl->complete);
+ spin_unlock(&tbl->slot_tbl_lock);
+ return wait_for_completion_interruptible(&tbl->complete);
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+ return 0;
+}
+
+static int nfs4_begin_drain_session(struct nfs_client *clp)
+{
+ struct nfs4_session *ses = clp->cl_session;
+ int ret = 0;
+
+ set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
+ /* back channel */
+ ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
+ if (ret)
+ return ret;
+ /* fore channel */
+ return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
+}
+
+int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ int status;
+
+ if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state))
+ goto do_confirm;
+ nfs4_begin_drain_session(clp);
+ status = nfs4_proc_exchange_id(clp, cred);
+ if (status != 0)
+ goto out;
+ set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+do_confirm:
+ status = nfs4_proc_create_session(clp);
+ if (status != 0)
+ goto out;
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ nfs41_setup_state_renewal(clp);
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+out:
+ return status;
+}
+
+struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
+{
+ struct rpc_cred *cred;
+
+ spin_lock(&clp->cl_lock);
+ cred = nfs4_get_machine_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
+ return cred;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+static struct rpc_cred *
+nfs4_get_setclientid_cred_server(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct rpc_cred *cred = NULL;
+ struct nfs4_state_owner *sp;
+ struct rb_node *pos;
+
+ spin_lock(&clp->cl_lock);
+ pos = rb_first(&server->state_owners);
+ if (pos != NULL) {
+ sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
+ cred = get_rpccred(sp->so_cred);
+ }
+ spin_unlock(&clp->cl_lock);
+ return cred;
+}
+
+/**
+ * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ struct rpc_cred *cred;
+
+ spin_lock(&clp->cl_lock);
+ cred = nfs4_get_machine_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
+ if (cred != NULL)
+ goto out;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ cred = nfs4_get_setclientid_cred_server(server);
+ if (cred != NULL)
+ break;
+ }
+ rcu_read_unlock();
+
+out:
+ return cred;
+}
+
+static struct nfs4_state_owner *
+nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
+{
+ struct rb_node **p = &server->state_owners.rb_node,
+ *parent = NULL;
+ struct nfs4_state_owner *sp;
+
+ while (*p != NULL) {
+ parent = *p;
+ sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
+
+ if (cred < sp->so_cred)
+ p = &parent->rb_left;
+ else if (cred > sp->so_cred)
+ p = &parent->rb_right;
+ else {
+ if (!list_empty(&sp->so_lru))
+ list_del_init(&sp->so_lru);
+ atomic_inc(&sp->so_count);
+ return sp;
+ }
+ }
+ return NULL;
+}
+
+static struct nfs4_state_owner *
+nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
+{
+ struct nfs_server *server = new->so_server;
+ struct rb_node **p = &server->state_owners.rb_node,
+ *parent = NULL;
+ struct nfs4_state_owner *sp;
+ int err;
+
+ while (*p != NULL) {
+ parent = *p;
+ sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
+
+ if (new->so_cred < sp->so_cred)
+ p = &parent->rb_left;
+ else if (new->so_cred > sp->so_cred)
+ p = &parent->rb_right;
+ else {
+ if (!list_empty(&sp->so_lru))
+ list_del_init(&sp->so_lru);
+ atomic_inc(&sp->so_count);
+ return sp;
+ }
+ }
+ err = ida_get_new(&server->openowner_id, &new->so_seqid.owner_id);
+ if (err)
+ return ERR_PTR(err);
+ rb_link_node(&new->so_server_node, parent, p);
+ rb_insert_color(&new->so_server_node, &server->state_owners);
+ return new;
+}
+
+static void
+nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
+{
+ struct nfs_server *server = sp->so_server;
+
+ if (!RB_EMPTY_NODE(&sp->so_server_node))
+ rb_erase(&sp->so_server_node, &server->state_owners);
+ ida_remove(&server->openowner_id, sp->so_seqid.owner_id);
+}
+
+static void
+nfs4_init_seqid_counter(struct nfs_seqid_counter *sc)
+{
+ sc->create_time = ktime_get();
+ sc->flags = 0;
+ sc->counter = 0;
+ spin_lock_init(&sc->lock);
+ INIT_LIST_HEAD(&sc->list);
+ rpc_init_wait_queue(&sc->wait, "Seqid_waitqueue");
+}
+
+static void
+nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc)
+{
+ rpc_destroy_wait_queue(&sc->wait);
+}
+
+/*
+ * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
+ * create a new state_owner.
+ *
+ */
+static struct nfs4_state_owner *
+nfs4_alloc_state_owner(struct nfs_server *server,
+ struct rpc_cred *cred,
+ gfp_t gfp_flags)
+{
+ struct nfs4_state_owner *sp;
+
+ sp = kzalloc(sizeof(*sp), gfp_flags);
+ if (!sp)
+ return NULL;
+ sp->so_server = server;
+ sp->so_cred = get_rpccred(cred);
+ spin_lock_init(&sp->so_lock);
+ INIT_LIST_HEAD(&sp->so_states);
+ nfs4_init_seqid_counter(&sp->so_seqid);
+ atomic_set(&sp->so_count, 1);
+ INIT_LIST_HEAD(&sp->so_lru);
+ return sp;
+}
+
+static void
+nfs4_drop_state_owner(struct nfs4_state_owner *sp)
+{
+ struct rb_node *rb_node = &sp->so_server_node;
+
+ if (!RB_EMPTY_NODE(rb_node)) {
+ struct nfs_server *server = sp->so_server;
+ struct nfs_client *clp = server->nfs_client;
+
+ spin_lock(&clp->cl_lock);
+ if (!RB_EMPTY_NODE(rb_node)) {
+ rb_erase(rb_node, &server->state_owners);
+ RB_CLEAR_NODE(rb_node);
+ }
+ spin_unlock(&clp->cl_lock);
+ }
+}
+
+static void nfs4_free_state_owner(struct nfs4_state_owner *sp)
+{
+ nfs4_destroy_seqid_counter(&sp->so_seqid);
+ put_rpccred(sp->so_cred);
+ kfree(sp);
+}
+
+static void nfs4_gc_state_owners(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state_owner *sp, *tmp;
+ unsigned long time_min, time_max;
+ LIST_HEAD(doomed);
+
+ spin_lock(&clp->cl_lock);
+ time_max = jiffies;
+ time_min = (long)time_max - (long)clp->cl_lease_time;
+ list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
+ /* NB: LRU is sorted so that oldest is at the head */
+ if (time_in_range(sp->so_expires, time_min, time_max))
+ break;
+ list_move(&sp->so_lru, &doomed);
+ nfs4_remove_state_owner_locked(sp);
+ }
+ spin_unlock(&clp->cl_lock);
+
+ list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+ list_del(&sp->so_lru);
+ nfs4_free_state_owner(sp);
+ }
+}
+
+/**
+ * nfs4_get_state_owner - Look up a state owner given a credential
+ * @server: nfs_server to search
+ * @cred: RPC credential to match
+ *
+ * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
+ */
+struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
+ struct rpc_cred *cred,
+ gfp_t gfp_flags)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state_owner *sp, *new;
+
+ spin_lock(&clp->cl_lock);
+ sp = nfs4_find_state_owner_locked(server, cred);
+ spin_unlock(&clp->cl_lock);
+ if (sp != NULL)
+ goto out;
+ new = nfs4_alloc_state_owner(server, cred, gfp_flags);
+ if (new == NULL)
+ goto out;
+ do {
+ if (ida_pre_get(&server->openowner_id, gfp_flags) == 0)
+ break;
+ spin_lock(&clp->cl_lock);
+ sp = nfs4_insert_state_owner_locked(new);
+ spin_unlock(&clp->cl_lock);
+ } while (sp == ERR_PTR(-EAGAIN));
+ if (sp != new)
+ nfs4_free_state_owner(new);
+out:
+ nfs4_gc_state_owners(server);
+ return sp;
+}
+
+/**
+ * nfs4_put_state_owner - Release a nfs4_state_owner
+ * @sp: state owner data to release
+ *
+ * Note that we keep released state owners on an LRU
+ * list.
+ * This caches valid state owners so that they can be
+ * reused, to avoid the OPEN_CONFIRM on minor version 0.
+ * It also pins the uniquifier of dropped state owners for
+ * a while, to ensure that those state owner names are
+ * never reused.
+ */
+void nfs4_put_state_owner(struct nfs4_state_owner *sp)
+{
+ struct nfs_server *server = sp->so_server;
+ struct nfs_client *clp = server->nfs_client;
+
+ if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
+ return;
+
+ sp->so_expires = jiffies;
+ list_add_tail(&sp->so_lru, &server->state_owners_lru);
+ spin_unlock(&clp->cl_lock);
+}
+
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @server: nfs_server with cached state owners to release
+ *
+ * Called at umount time. Remaining state owners will be on
+ * the LRU with ref count of zero.
+ */
+void nfs4_purge_state_owners(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state_owner *sp, *tmp;
+ LIST_HEAD(doomed);
+
+ spin_lock(&clp->cl_lock);
+ list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
+ list_move(&sp->so_lru, &doomed);
+ nfs4_remove_state_owner_locked(sp);
+ }
+ spin_unlock(&clp->cl_lock);
+
+ list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+ list_del(&sp->so_lru);
+ nfs4_free_state_owner(sp);
+ }
+}
+
+static struct nfs4_state *
+nfs4_alloc_open_state(void)
+{
+ struct nfs4_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_NOFS);
+ if (!state)
+ return NULL;
+ atomic_set(&state->count, 1);
+ INIT_LIST_HEAD(&state->lock_states);
+ spin_lock_init(&state->state_lock);
+ seqlock_init(&state->seqlock);
+ return state;
+}
+
+void
+nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
+{
+ if (state->state == fmode)
+ return;
+ /* NB! List reordering - see the reclaim code for why. */
+ if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
+ if (fmode & FMODE_WRITE)
+ list_move(&state->open_states, &state->owner->so_states);
+ else
+ list_move_tail(&state->open_states, &state->owner->so_states);
+ }
+ state->state = fmode;
+}
+
+static struct nfs4_state *
+__nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs4_state *state;
+
+ list_for_each_entry(state, &nfsi->open_states, inode_states) {
+ if (state->owner != owner)
+ continue;
+ if (atomic_inc_not_zero(&state->count))
+ return state;
+ }
+ return NULL;
+}
+
+static void
+nfs4_free_open_state(struct nfs4_state *state)
+{
+ kfree(state);
+}
+
+struct nfs4_state *
+nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
+{
+ struct nfs4_state *state, *new;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ spin_lock(&inode->i_lock);
+ state = __nfs4_find_state_byowner(inode, owner);
+ spin_unlock(&inode->i_lock);
+ if (state)
+ goto out;
+ new = nfs4_alloc_open_state();
+ spin_lock(&owner->so_lock);
+ spin_lock(&inode->i_lock);
+ state = __nfs4_find_state_byowner(inode, owner);
+ if (state == NULL && new != NULL) {
+ state = new;
+ state->owner = owner;
+ atomic_inc(&owner->so_count);
+ list_add(&state->inode_states, &nfsi->open_states);
+ ihold(inode);
+ state->inode = inode;
+ spin_unlock(&inode->i_lock);
+ /* Note: The reclaim code dictates that we add stateless
+ * and read-only stateids to the end of the list */
+ list_add_tail(&state->open_states, &owner->so_states);
+ spin_unlock(&owner->so_lock);
+ } else {
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&owner->so_lock);
+ if (new)
+ nfs4_free_open_state(new);
+ }
+out:
+ return state;
+}
+
+void nfs4_put_open_state(struct nfs4_state *state)
+{
+ struct inode *inode = state->inode;
+ struct nfs4_state_owner *owner = state->owner;
+
+ if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
+ return;
+ spin_lock(&inode->i_lock);
+ list_del(&state->inode_states);
+ list_del(&state->open_states);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&owner->so_lock);
+ iput(inode);
+ nfs4_free_open_state(state);
+ nfs4_put_state_owner(owner);
+}
+
+/*
+ * Close the current file.
+ */
+static void __nfs4_close(struct nfs4_state *state,
+ fmode_t fmode, gfp_t gfp_mask, int wait)
+{
+ struct nfs4_state_owner *owner = state->owner;
+ int call_close = 0;
+ fmode_t newstate;
+
+ atomic_inc(&owner->so_count);
+ /* Protect against nfs4_find_state() */
+ spin_lock(&owner->so_lock);
+ switch (fmode & (FMODE_READ | FMODE_WRITE)) {
+ case FMODE_READ:
+ state->n_rdonly--;
+ break;
+ case FMODE_WRITE:
+ state->n_wronly--;
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ state->n_rdwr--;
+ }
+ newstate = FMODE_READ|FMODE_WRITE;
+ if (state->n_rdwr == 0) {
+ if (state->n_rdonly == 0) {
+ newstate &= ~FMODE_READ;
+ call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ }
+ if (state->n_wronly == 0) {
+ newstate &= ~FMODE_WRITE;
+ call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
+ call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
+ }
+ if (newstate == 0)
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ }
+ nfs4_state_set_mode_locked(state, newstate);
+ spin_unlock(&owner->so_lock);
+
+ if (!call_close) {
+ nfs4_put_open_state(state);
+ nfs4_put_state_owner(owner);
+ } else {
+ bool roc = pnfs_roc(state->inode);
+
+ nfs4_do_close(state, gfp_mask, wait, roc);
+ }
+}
+
+void nfs4_close_state(struct nfs4_state *state, fmode_t fmode)
+{
+ __nfs4_close(state, fmode, GFP_NOFS, 0);
+}
+
+void nfs4_close_sync(struct nfs4_state *state, fmode_t fmode)
+{
+ __nfs4_close(state, fmode, GFP_KERNEL, 1);
+}
+
+/*
+ * Search the state->lock_states for an existing lock_owner
+ * that is compatible with current->files
+ */
+static struct nfs4_lock_state *
+__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
+{
+ struct nfs4_lock_state *pos;
+ list_for_each_entry(pos, &state->lock_states, ls_locks) {
+ if (type != NFS4_ANY_LOCK_TYPE && pos->ls_owner.lo_type != type)
+ continue;
+ switch (pos->ls_owner.lo_type) {
+ case NFS4_POSIX_LOCK_TYPE:
+ if (pos->ls_owner.lo_u.posix_owner != fl_owner)
+ continue;
+ break;
+ case NFS4_FLOCK_LOCK_TYPE:
+ if (pos->ls_owner.lo_u.flock_owner != fl_pid)
+ continue;
+ }
+ atomic_inc(&pos->ls_count);
+ return pos;
+ }
+ return NULL;
+}
+
+/*
+ * Return a compatible lock_state. If no initialized lock_state structure
+ * exists, return an uninitialized one.
+ *
+ */
+static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
+{
+ struct nfs4_lock_state *lsp;
+ struct nfs_server *server = state->owner->so_server;
+
+ lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
+ if (lsp == NULL)
+ return NULL;
+ nfs4_init_seqid_counter(&lsp->ls_seqid);
+ atomic_set(&lsp->ls_count, 1);
+ lsp->ls_state = state;
+ lsp->ls_owner.lo_type = type;
+ switch (lsp->ls_owner.lo_type) {
+ case NFS4_FLOCK_LOCK_TYPE:
+ lsp->ls_owner.lo_u.flock_owner = fl_pid;
+ break;
+ case NFS4_POSIX_LOCK_TYPE:
+ lsp->ls_owner.lo_u.posix_owner = fl_owner;
+ break;
+ default:
+ goto out_free;
+ }
+ lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
+ if (lsp->ls_seqid.owner_id < 0)
+ goto out_free;
+ INIT_LIST_HEAD(&lsp->ls_locks);
+ return lsp;
+out_free:
+ kfree(lsp);
+ return NULL;
+}
+
+void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
+{
+ ida_simple_remove(&server->lockowner_id, lsp->ls_seqid.owner_id);
+ nfs4_destroy_seqid_counter(&lsp->ls_seqid);
+ kfree(lsp);
+}
+
+/*
+ * Return a compatible lock_state. If no initialized lock_state structure
+ * exists, return an uninitialized one.
+ *
+ */
+static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner, pid_t pid, unsigned int type)
+{
+ struct nfs4_lock_state *lsp, *new = NULL;
+
+ for(;;) {
+ spin_lock(&state->state_lock);
+ lsp = __nfs4_find_lock_state(state, owner, pid, type);
+ if (lsp != NULL)
+ break;
+ if (new != NULL) {
+ list_add(&new->ls_locks, &state->lock_states);
+ set_bit(LK_STATE_IN_USE, &state->flags);
+ lsp = new;
+ new = NULL;
+ break;
+ }
+ spin_unlock(&state->state_lock);
+ new = nfs4_alloc_lock_state(state, owner, pid, type);
+ if (new == NULL)
+ return NULL;
+ }
+ spin_unlock(&state->state_lock);
+ if (new != NULL)
+ nfs4_free_lock_state(state->owner->so_server, new);
+ return lsp;
+}
+
+/*
+ * Release reference to lock_state, and free it if we see that
+ * it is no longer in use
+ */
+void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
+{
+ struct nfs4_state *state;
+
+ if (lsp == NULL)
+ return;
+ state = lsp->ls_state;
+ if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
+ return;
+ list_del(&lsp->ls_locks);
+ if (list_empty(&state->lock_states))
+ clear_bit(LK_STATE_IN_USE, &state->flags);
+ spin_unlock(&state->state_lock);
+ if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
+ if (nfs4_release_lockowner(lsp) == 0)
+ return;
+ }
+ nfs4_free_lock_state(lsp->ls_state->owner->so_server, lsp);
+}
+
+static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
+{
+ struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
+
+ dst->fl_u.nfs4_fl.owner = lsp;
+ atomic_inc(&lsp->ls_count);
+}
+
+static void nfs4_fl_release_lock(struct file_lock *fl)
+{
+ nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
+}
+
+static const struct file_lock_operations nfs4_fl_lock_ops = {
+ .fl_copy_lock = nfs4_fl_copy_lock,
+ .fl_release_private = nfs4_fl_release_lock,
+};
+
+int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
+{
+ struct nfs4_lock_state *lsp;
+
+ if (fl->fl_ops != NULL)
+ return 0;
+ if (fl->fl_flags & FL_POSIX)
+ lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, NFS4_POSIX_LOCK_TYPE);
+ else if (fl->fl_flags & FL_FLOCK)
+ lsp = nfs4_get_lock_state(state, NULL, fl->fl_pid,
+ NFS4_FLOCK_LOCK_TYPE);
+ else
+ return -EINVAL;
+ if (lsp == NULL)
+ return -ENOMEM;
+ fl->fl_u.nfs4_fl.owner = lsp;
+ fl->fl_ops = &nfs4_fl_lock_ops;
+ return 0;
+}
+
+static bool nfs4_copy_lock_stateid(nfs4_stateid *dst, struct nfs4_state *state,
+ fl_owner_t fl_owner, pid_t fl_pid)
+{
+ struct nfs4_lock_state *lsp;
+ bool ret = false;
+
+ if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
+ goto out;
+
+ spin_lock(&state->state_lock);
+ lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, NFS4_ANY_LOCK_TYPE);
+ if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) {
+ nfs4_stateid_copy(dst, &lsp->ls_stateid);
+ ret = true;
+ }
+ spin_unlock(&state->state_lock);
+ nfs4_put_lock_state(lsp);
+out:
+ return ret;
+}
+
+static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
+{
+ int seq;
+
+ do {
+ seq = read_seqbegin(&state->seqlock);
+ nfs4_stateid_copy(dst, &state->stateid);
+ } while (read_seqretry(&state->seqlock, seq));
+}
+
+/*
+ * Byte-range lock aware utility to initialize the stateid of read/write
+ * requests.
+ */
+void nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
+ fmode_t fmode, fl_owner_t fl_owner, pid_t fl_pid)
+{
+ if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
+ return;
+ if (nfs4_copy_lock_stateid(dst, state, fl_owner, fl_pid))
+ return;
+ nfs4_copy_open_stateid(dst, state);
+}
+
+struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask)
+{
+ struct nfs_seqid *new;
+
+ new = kmalloc(sizeof(*new), gfp_mask);
+ if (new != NULL) {
+ new->sequence = counter;
+ INIT_LIST_HEAD(&new->list);
+ new->task = NULL;
+ }
+ return new;
+}
+
+void nfs_release_seqid(struct nfs_seqid *seqid)
+{
+ struct nfs_seqid_counter *sequence;
+
+ if (list_empty(&seqid->list))
+ return;
+ sequence = seqid->sequence;
+ spin_lock(&sequence->lock);
+ list_del_init(&seqid->list);
+ if (!list_empty(&sequence->list)) {
+ struct nfs_seqid *next;
+
+ next = list_first_entry(&sequence->list,
+ struct nfs_seqid, list);
+ rpc_wake_up_queued_task(&sequence->wait, next->task);
+ }
+ spin_unlock(&sequence->lock);
+}
+
+void nfs_free_seqid(struct nfs_seqid *seqid)
+{
+ nfs_release_seqid(seqid);
+ kfree(seqid);
+}
+
+/*
+ * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
+{
+ BUG_ON(list_first_entry(&seqid->sequence->list, struct nfs_seqid, list) != seqid);
+ switch (status) {
+ case 0:
+ break;
+ case -NFS4ERR_BAD_SEQID:
+ if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
+ return;
+ pr_warn_ratelimited("NFS: v4 server returned a bad"
+ " sequence-id error on an"
+ " unconfirmed sequence %p!\n",
+ seqid->sequence);
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_BADXDR:
+ case -NFS4ERR_RESOURCE:
+ case -NFS4ERR_NOFILEHANDLE:
+ /* Non-seqid mutating errors */
+ return;
+ };
+ /*
+ * Note: no locking needed as we are guaranteed to be first
+ * on the sequence list
+ */
+ seqid->sequence->counter++;
+}
+
+void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
+{
+ struct nfs4_state_owner *sp = container_of(seqid->sequence,
+ struct nfs4_state_owner, so_seqid);
+ struct nfs_server *server = sp->so_server;
+
+ if (status == -NFS4ERR_BAD_SEQID)
+ nfs4_drop_state_owner(sp);
+ if (!nfs4_has_session(server->nfs_client))
+ nfs_increment_seqid(status, seqid);
+}
+
+/*
+ * Increment the seqid if the LOCK/LOCKU succeeded, or
+ * failed with a seqid incrementing error -
+ * see comments nfs_fs.h:seqid_mutating_error()
+ */
+void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
+{
+ nfs_increment_seqid(status, seqid);
+}
+
+int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
+{
+ struct nfs_seqid_counter *sequence = seqid->sequence;
+ int status = 0;
+
+ spin_lock(&sequence->lock);
+ seqid->task = task;
+ if (list_empty(&seqid->list))
+ list_add_tail(&seqid->list, &sequence->list);
+ if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
+ goto unlock;
+ rpc_sleep_on(&sequence->wait, task, NULL);
+ status = -EAGAIN;
+unlock:
+ spin_unlock(&sequence->lock);
+ return status;
+}
+
+static int nfs4_run_state_manager(void *);
+
+static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
+ rpc_wake_up(&clp->cl_rpcwaitq);
+}
+
+/*
+ * Schedule the nfs_client asynchronous state management routine
+ */
+void nfs4_schedule_state_manager(struct nfs_client *clp)
+{
+ struct task_struct *task;
+ char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
+
+ if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
+ return;
+ __module_get(THIS_MODULE);
+ atomic_inc(&clp->cl_count);
+
+ /* The rcu_read_lock() is not strictly necessary, as the state
+ * manager is the only thread that ever changes the rpc_xprt
+ * after it's initialized. At this point, we're single threaded. */
+ rcu_read_lock();
+ snprintf(buf, sizeof(buf), "%s-manager",
+ rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+ rcu_read_unlock();
+ task = kthread_run(nfs4_run_state_manager, clp, buf);
+ if (IS_ERR(task)) {
+ printk(KERN_ERR "%s: kthread_run: %ld\n",
+ __func__, PTR_ERR(task));
+ nfs4_clear_state_manager_bit(clp);
+ nfs_put_client(clp);
+ module_put(THIS_MODULE);
+ }
+}
+
+/*
+ * Schedule a lease recovery attempt
+ */
+void nfs4_schedule_lease_recovery(struct nfs_client *clp)
+{
+ if (!clp)
+ return;
+ if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+ nfs4_schedule_state_manager(clp);
+}
+EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
+
+/*
+ * nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
+ * @clp: client to process
+ *
+ * Set the NFS4CLNT_LEASE_EXPIRED state in order to force a
+ * resend of the SETCLIENTID and hence re-establish the
+ * callback channel. Then return all existing delegations.
+ */
+static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
+{
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs_expire_all_delegations(clp);
+}
+
+void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
+{
+ nfs40_handle_cb_pathdown(clp);
+ nfs4_schedule_state_manager(clp);
+}
+
+static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
+{
+
+ set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
+ /* Don't recover state that expired before the reboot */
+ if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
+ clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
+ return 0;
+ }
+ set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
+ return 1;
+}
+
+static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
+{
+ set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
+ clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
+ set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
+ set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
+ return 1;
+}
+
+void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
+{
+ struct nfs_client *clp = server->nfs_client;
+
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ nfs4_schedule_state_manager(clp);
+}
+EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
+
+void nfs_inode_find_state_and_recover(struct inode *inode,
+ const nfs4_stateid *stateid)
+{
+ struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_open_context *ctx;
+ struct nfs4_state *state;
+ bool found = false;
+
+ spin_lock(&inode->i_lock);
+ list_for_each_entry(ctx, &nfsi->open_files, list) {
+ state = ctx->state;
+ if (state == NULL)
+ continue;
+ if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
+ continue;
+ if (!nfs4_stateid_match(&state->stateid, stateid))
+ continue;
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ found = true;
+ }
+ spin_unlock(&inode->i_lock);
+ if (found)
+ nfs4_schedule_state_manager(clp);
+}
+
+
+static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
+{
+ struct inode *inode = state->inode;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct file_lock *fl;
+ int status = 0;
+
+ if (inode->i_flock == NULL)
+ return 0;
+
+ /* Guard against delegation returns and new lock/unlock calls */
+ down_write(&nfsi->rwsem);
+ /* Protect inode->i_flock using the BKL */
+ lock_flocks();
+ for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
+ if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
+ continue;
+ if (nfs_file_open_context(fl->fl_file)->state != state)
+ continue;
+ unlock_flocks();
+ status = ops->recover_lock(state, fl);
+ switch (status) {
+ case 0:
+ break;
+ case -ESTALE:
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ goto out;
+ default:
+ printk(KERN_ERR "NFS: %s: unhandled error %d. "
+ "Zeroing state\n", __func__, status);
+ case -ENOMEM:
+ case -NFS4ERR_DENIED:
+ case -NFS4ERR_RECLAIM_BAD:
+ case -NFS4ERR_RECLAIM_CONFLICT:
+ /* kill_proc(fl->fl_pid, SIGLOST, 1); */
+ status = 0;
+ }
+ lock_flocks();
+ }
+ unlock_flocks();
+out:
+ up_write(&nfsi->rwsem);
+ return status;
+}
+
+static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
+{
+ struct nfs4_state *state;
+ struct nfs4_lock_state *lock;
+ int status = 0;
+
+ /* Note: we rely on the sp->so_states list being ordered
+ * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
+ * states first.
+ * This is needed to ensure that the server won't give us any
+ * read delegations that we have to return if, say, we are
+ * recovering after a network partition or a reboot from a
+ * server that doesn't support a grace period.
+ */
+restart:
+ spin_lock(&sp->so_lock);
+ list_for_each_entry(state, &sp->so_states, open_states) {
+ if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
+ continue;
+ if (state->state == 0)
+ continue;
+ atomic_inc(&state->count);
+ spin_unlock(&sp->so_lock);
+ status = ops->recover_open(sp, state);
+ if (status >= 0) {
+ status = nfs4_reclaim_locks(state, ops);
+ if (status >= 0) {
+ spin_lock(&state->state_lock);
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
+ pr_warn_ratelimited("NFS: "
+ "%s: Lock reclaim "
+ "failed!\n", __func__);
+ }
+ spin_unlock(&state->state_lock);
+ nfs4_put_open_state(state);
+ goto restart;
+ }
+ }
+ switch (status) {
+ default:
+ printk(KERN_ERR "NFS: %s: unhandled error %d. "
+ "Zeroing state\n", __func__, status);
+ case -ENOENT:
+ case -ENOMEM:
+ case -ESTALE:
+ /*
+ * Open state on this file cannot be recovered
+ * All we can do is revert to using the zero stateid.
+ */
+ memset(&state->stateid, 0,
+ sizeof(state->stateid));
+ /* Mark the file as being 'closed' */
+ state->state = 0;
+ break;
+ case -EKEYEXPIRED:
+ /*
+ * User RPCSEC_GSS context has expired.
+ * We cannot recover this stateid now, so
+ * skip it and allow recovery thread to
+ * proceed.
+ */
+ break;
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_RECLAIM_BAD:
+ case -NFS4ERR_RECLAIM_CONFLICT:
+ nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
+ break;
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ goto out_err;
+ }
+ nfs4_put_open_state(state);
+ goto restart;
+ }
+ spin_unlock(&sp->so_lock);
+ return 0;
+out_err:
+ nfs4_put_open_state(state);
+ return status;
+}
+
+static void nfs4_clear_open_state(struct nfs4_state *state)
+{
+ struct nfs4_lock_state *lock;
+
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ spin_lock(&state->state_lock);
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ lock->ls_seqid.flags = 0;
+ lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
+ }
+ spin_unlock(&state->state_lock);
+}
+
+static void nfs4_reset_seqids(struct nfs_server *server,
+ int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state_owner *sp;
+ struct rb_node *pos;
+ struct nfs4_state *state;
+
+ spin_lock(&clp->cl_lock);
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+ pos = rb_next(pos)) {
+ sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
+ sp->so_seqid.flags = 0;
+ spin_lock(&sp->so_lock);
+ list_for_each_entry(state, &sp->so_states, open_states) {
+ if (mark_reclaim(clp, state))
+ nfs4_clear_open_state(state);
+ }
+ spin_unlock(&sp->so_lock);
+ }
+ spin_unlock(&clp->cl_lock);
+}
+
+static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
+ int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+{
+ struct nfs_server *server;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs4_reset_seqids(server, mark_reclaim);
+ rcu_read_unlock();
+}
+
+static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
+{
+ /* Mark all delegations for reclaim */
+ nfs_delegation_mark_reclaim(clp);
+ nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
+}
+
+static void nfs4_reclaim_complete(struct nfs_client *clp,
+ const struct nfs4_state_recovery_ops *ops)
+{
+ /* Notify the server we're done reclaiming our state */
+ if (ops->reclaim_complete)
+ (void)ops->reclaim_complete(clp);
+}
+
+static void nfs4_clear_reclaim_server(struct nfs_server *server)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state_owner *sp;
+ struct rb_node *pos;
+ struct nfs4_state *state;
+
+ spin_lock(&clp->cl_lock);
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+ pos = rb_next(pos)) {
+ sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
+ spin_lock(&sp->so_lock);
+ list_for_each_entry(state, &sp->so_states, open_states) {
+ if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
+ &state->flags))
+ continue;
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ }
+ spin_unlock(&sp->so_lock);
+ }
+ spin_unlock(&clp->cl_lock);
+}
+
+static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+
+ if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+ return 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+ nfs4_clear_reclaim_server(server);
+ rcu_read_unlock();
+
+ nfs_delegation_reap_unclaimed(clp);
+ return 1;
+}
+
+static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
+{
+ if (!nfs4_state_clear_reclaim_reboot(clp))
+ return;
+ nfs4_reclaim_complete(clp, clp->cl_mvops->reboot_recovery_ops);
+}
+
+static void nfs_delegation_clear_all(struct nfs_client *clp)
+{
+ nfs_delegation_mark_reclaim(clp);
+ nfs_delegation_reap_unclaimed(clp);
+}
+
+static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
+{
+ nfs_delegation_clear_all(clp);
+ nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
+}
+
+static void nfs4_warn_keyexpired(const char *s)
+{
+ printk_ratelimited(KERN_WARNING "Error: state manager"
+ " encountered RPCSEC_GSS session"
+ " expired against NFSv4 server %s.\n",
+ s);
+}
+
+static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
+{
+ switch (error) {
+ case 0:
+ break;
+ case -NFS4ERR_CB_PATH_DOWN:
+ nfs40_handle_cb_pathdown(clp);
+ break;
+ case -NFS4ERR_NO_GRACE:
+ nfs4_state_end_reclaim_reboot(clp);
+ break;
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_LEASE_MOVED:
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_clear_reclaim_reboot(clp);
+ nfs4_state_start_reclaim_reboot(clp);
+ break;
+ case -NFS4ERR_EXPIRED:
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_start_reclaim_nograce(clp);
+ break;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ /* Zero session reset errors */
+ break;
+ case -EKEYEXPIRED:
+ /* Nothing we can do */
+ nfs4_warn_keyexpired(clp->cl_hostname);
+ break;
+ default:
+ return error;
+ }
+ return 0;
+}
+
+static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
+{
+ struct nfs4_state_owner *sp;
+ struct nfs_server *server;
+ struct rb_node *pos;
+ int status = 0;
+
+restart:
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ nfs4_purge_state_owners(server);
+ spin_lock(&clp->cl_lock);
+ for (pos = rb_first(&server->state_owners);
+ pos != NULL;
+ pos = rb_next(pos)) {
+ sp = rb_entry(pos,
+ struct nfs4_state_owner, so_server_node);
+ if (!test_and_clear_bit(ops->owner_flag_bit,
+ &sp->so_flags))
+ continue;
+ atomic_inc(&sp->so_count);
+ spin_unlock(&clp->cl_lock);
+ rcu_read_unlock();
+
+ status = nfs4_reclaim_open_state(sp, ops);
+ if (status < 0) {
+ set_bit(ops->owner_flag_bit, &sp->so_flags);
+ nfs4_put_state_owner(sp);
+ return nfs4_recovery_handle_error(clp, status);
+ }
+
+ nfs4_put_state_owner(sp);
+ goto restart;
+ }
+ spin_unlock(&clp->cl_lock);
+ }
+ rcu_read_unlock();
+ return status;
+}
+
+static int nfs4_check_lease(struct nfs_client *clp)
+{
+ struct rpc_cred *cred;
+ const struct nfs4_state_maintenance_ops *ops =
+ clp->cl_mvops->state_renewal_ops;
+ int status;
+
+ /* Is the client already known to have an expired lease? */
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ return 0;
+ spin_lock(&clp->cl_lock);
+ cred = ops->get_state_renewal_cred_locked(clp);
+ spin_unlock(&clp->cl_lock);
+ if (cred == NULL) {
+ cred = nfs4_get_setclientid_cred(clp);
+ status = -ENOKEY;
+ if (cred == NULL)
+ goto out;
+ }
+ status = ops->renew_lease(clp, cred);
+ put_rpccred(cred);
+out:
+ return nfs4_recovery_handle_error(clp, status);
+}
+
+static int nfs4_reclaim_lease(struct nfs_client *clp)
+{
+ struct rpc_cred *cred;
+ const struct nfs4_state_recovery_ops *ops =
+ clp->cl_mvops->reboot_recovery_ops;
+ int status = -ENOENT;
+
+ cred = ops->get_clid_cred(clp);
+ if (cred != NULL) {
+ status = ops->establish_clid(clp, cred);
+ put_rpccred(cred);
+ /* Handle case where the user hasn't set up machine creds */
+ if (status == -EACCES && cred == clp->cl_machine_cred) {
+ nfs4_clear_machine_cred(clp);
+ status = -EAGAIN;
+ }
+ if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
+ status = -EPROTONOSUPPORT;
+ }
+ return status;
+}
+
+#ifdef CONFIG_NFS_V4_1
+void nfs4_schedule_session_recovery(struct nfs4_session *session)
+{
+ struct nfs_client *clp = session->clp;
+
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ nfs4_schedule_lease_recovery(clp);
+}
+EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
+
+void nfs41_handle_recall_slot(struct nfs_client *clp)
+{
+ set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+ nfs4_schedule_state_manager(clp);
+}
+
+static void nfs4_reset_all_state(struct nfs_client *clp)
+{
+ if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
+ clp->cl_boot_time = CURRENT_TIME;
+ nfs4_state_start_reclaim_nograce(clp);
+ nfs4_schedule_state_manager(clp);
+ }
+}
+
+static void nfs41_handle_server_reboot(struct nfs_client *clp)
+{
+ if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
+ nfs4_state_start_reclaim_reboot(clp);
+ nfs4_schedule_state_manager(clp);
+ }
+}
+
+static void nfs41_handle_state_revoked(struct nfs_client *clp)
+{
+ /* Temporary */
+ nfs4_reset_all_state(clp);
+}
+
+static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
+{
+ /* This will need to handle layouts too */
+ nfs_expire_all_delegations(clp);
+}
+
+static void nfs41_handle_cb_path_down(struct nfs_client *clp)
+{
+ nfs_expire_all_delegations(clp);
+ if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
+ nfs4_schedule_state_manager(clp);
+}
+
+void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
+{
+ if (!flags)
+ return;
+ if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+ nfs41_handle_server_reboot(clp);
+ if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+ SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
+ SEQ4_STATUS_ADMIN_STATE_REVOKED |
+ SEQ4_STATUS_LEASE_MOVED))
+ nfs41_handle_state_revoked(clp);
+ if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+ nfs41_handle_recallable_state_revoked(clp);
+ if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+ SEQ4_STATUS_BACKCHANNEL_FAULT |
+ SEQ4_STATUS_CB_PATH_DOWN_SESSION))
+ nfs41_handle_cb_path_down(clp);
+}
+
+static int nfs4_reset_session(struct nfs_client *clp)
+{
+ int status;
+
+ nfs4_begin_drain_session(clp);
+ status = nfs4_proc_destroy_session(clp->cl_session);
+ if (status && status != -NFS4ERR_BADSESSION &&
+ status != -NFS4ERR_DEADSESSION) {
+ status = nfs4_recovery_handle_error(clp, status);
+ goto out;
+ }
+
+ memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
+ status = nfs4_proc_create_session(clp);
+ if (status) {
+ status = nfs4_recovery_handle_error(clp, status);
+ goto out;
+ }
+ clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ /* create_session negotiated new slot table */
+ clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
+
+ /* Let the state manager reestablish state */
+ if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ nfs41_setup_state_renewal(clp);
+out:
+ return status;
+}
+
+static int nfs4_recall_slot(struct nfs_client *clp)
+{
+ struct nfs4_slot_table *fc_tbl = &clp->cl_session->fc_slot_table;
+ struct nfs4_channel_attrs *fc_attrs = &clp->cl_session->fc_attrs;
+ struct nfs4_slot *new, *old;
+ int i;
+
+ nfs4_begin_drain_session(clp);
+ new = kmalloc(fc_tbl->target_max_slots * sizeof(struct nfs4_slot),
+ GFP_NOFS);
+ if (!new)
+ return -ENOMEM;
+
+ spin_lock(&fc_tbl->slot_tbl_lock);
+ for (i = 0; i < fc_tbl->target_max_slots; i++)
+ new[i].seq_nr = fc_tbl->slots[i].seq_nr;
+ old = fc_tbl->slots;
+ fc_tbl->slots = new;
+ fc_tbl->max_slots = fc_tbl->target_max_slots;
+ fc_tbl->target_max_slots = 0;
+ fc_attrs->max_reqs = fc_tbl->max_slots;
+ spin_unlock(&fc_tbl->slot_tbl_lock);
+
+ kfree(old);
+ nfs4_end_drain_session(clp);
+ return 0;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
+static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
+static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
+#endif /* CONFIG_NFS_V4_1 */
+
+/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
+ * on EXCHANGE_ID for v4.1
+ */
+static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
+{
+ switch (status) {
+ case -NFS4ERR_CLID_INUSE:
+ case -NFS4ERR_STALE_CLIENTID:
+ clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
+ break;
+ case -NFS4ERR_DELAY:
+ case -ETIMEDOUT:
+ case -EAGAIN:
+ ssleep(1);
+ break;
+
+ case -EKEYEXPIRED:
+ nfs4_warn_keyexpired(clp->cl_hostname);
+ case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
+ * in nfs4_exchange_id */
+ default:
+ return;
+ }
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+}
+
+static void nfs4_state_manager(struct nfs_client *clp)
+{
+ int status = 0;
+
+ /* Ensure exclusive access to NFSv4 state */
+ do {
+ if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
+ /* We're going to have to re-establish a clientid */
+ status = nfs4_reclaim_lease(clp);
+ if (status) {
+ nfs4_set_lease_expired(clp, status);
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED,
+ &clp->cl_state))
+ continue;
+ if (clp->cl_cons_state ==
+ NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, status);
+ goto out_error;
+ }
+ clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
+
+ if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
+ &clp->cl_state))
+ nfs4_state_start_reclaim_nograce(clp);
+ else
+ set_bit(NFS4CLNT_RECLAIM_REBOOT,
+ &clp->cl_state);
+
+ pnfs_destroy_all_layouts(clp);
+ }
+
+ if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
+ status = nfs4_check_lease(clp);
+ if (status < 0)
+ goto out_error;
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ continue;
+ }
+
+ /* Initialize or reset the session */
+ if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
+ && nfs4_has_session(clp)) {
+ status = nfs4_reset_session(clp);
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
+ continue;
+ if (status < 0)
+ goto out_error;
+ }
+
+ /* First recover reboot state... */
+ if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->reboot_recovery_ops);
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+ test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
+ continue;
+ nfs4_state_end_reclaim_reboot(clp);
+ if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state))
+ continue;
+ if (status < 0)
+ goto out_error;
+ }
+
+ /* Now recover expired state... */
+ if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
+ status = nfs4_do_reclaim(clp,
+ clp->cl_mvops->nograce_recovery_ops);
+ if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) ||
+ test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) ||
+ test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+ continue;
+ if (status < 0)
+ goto out_error;
+ }
+
+ nfs4_end_drain_session(clp);
+ if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
+ nfs_client_return_marked_delegations(clp);
+ continue;
+ }
+ /* Recall session slots */
+ if (test_and_clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state)
+ && nfs4_has_session(clp)) {
+ status = nfs4_recall_slot(clp);
+ if (status < 0)
+ goto out_error;
+ continue;
+ }
+
+
+ nfs4_clear_state_manager_bit(clp);
+ /* Did we race with an attempt to give us more work? */
+ if (clp->cl_state == 0)
+ break;
+ if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
+ break;
+ } while (atomic_read(&clp->cl_count) > 1);
+ return;
+out_error:
+ pr_warn_ratelimited("NFS: state manager failed on NFSv4 server %s"
+ " with error %d\n", clp->cl_hostname, -status);
+ nfs4_end_drain_session(clp);
+ nfs4_clear_state_manager_bit(clp);
+}
+
+static int nfs4_run_state_manager(void *ptr)
+{
+ struct nfs_client *clp = ptr;
+
+ allow_signal(SIGKILL);
+ nfs4_state_manager(clp);
+ nfs_put_client(clp);
+ module_put_and_exit(0);
+ return 0;
+}
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
new file mode 100644
index 00000000..c54aae36
--- /dev/null
+++ b/fs/nfs/nfs4xdr.c
@@ -0,0 +1,7101 @@
+/*
+ * fs/nfs/nfs4xdr.c
+ *
+ * Client-side XDR for NFSv4.
+ *
+ * Copyright (c) 2002 The Regents of the University of Michigan.
+ * All rights reserved.
+ *
+ * Kendrick Smith <kmsmith@umich.edu>
+ * Andy Adamson <andros@umich.edu>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
+#include <linux/kdev_t.h>
+#include <linux/module.h>
+#include <linux/utsname.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/msg_prot.h>
+#include <linux/sunrpc/gss_api.h>
+#include <linux/nfs.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_idmap.h>
+#include "nfs4_fs.h"
+#include "internal.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_XDR
+
+/* Mapping from NFS error code to "errno" error code. */
+#define errno_NFSERR_IO EIO
+
+static int nfs4_stat_to_errno(int);
+
+/* NFSv4 COMPOUND tags are only wanted for debugging purposes */
+#ifdef DEBUG
+#define NFS4_MAXTAGLEN 20
+#else
+#define NFS4_MAXTAGLEN 0
+#endif
+
+/* lock,open owner id:
+ * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
+ */
+#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2)
+#define lock_owner_id_maxsz (1 + 1 + 4)
+#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
+#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
+#define op_encode_hdr_maxsz (1)
+#define op_decode_hdr_maxsz (2)
+#define encode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_stateid_maxsz (XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define encode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
+#define decode_verifier_maxsz (XDR_QUADLEN(NFS4_VERIFIER_SIZE))
+#define encode_putfh_maxsz (op_encode_hdr_maxsz + 1 + \
+ (NFS4_FHSIZE >> 2))
+#define decode_putfh_maxsz (op_decode_hdr_maxsz)
+#define encode_putrootfh_maxsz (op_encode_hdr_maxsz)
+#define decode_putrootfh_maxsz (op_decode_hdr_maxsz)
+#define encode_getfh_maxsz (op_encode_hdr_maxsz)
+#define decode_getfh_maxsz (op_decode_hdr_maxsz + 1 + \
+ ((3+NFS4_FHSIZE) >> 2))
+#define nfs4_fattr_bitmap_maxsz 4
+#define encode_getattr_maxsz (op_encode_hdr_maxsz + nfs4_fattr_bitmap_maxsz)
+#define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
+#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
+#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+#define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+/* This is based on getfattr, which uses the most attributes: */
+#define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
+ 3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz))
+#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \
+ nfs4_fattr_value_maxsz)
+#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
+#define encode_attrs_maxsz (nfs4_fattr_bitmap_maxsz + \
+ 1 + 2 + 1 + \
+ nfs4_owner_maxsz + \
+ nfs4_group_maxsz + \
+ 4 + 4)
+#define encode_savefh_maxsz (op_encode_hdr_maxsz)
+#define decode_savefh_maxsz (op_decode_hdr_maxsz)
+#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
+#define decode_restorefh_maxsz (op_decode_hdr_maxsz)
+#define encode_fsinfo_maxsz (encode_getattr_maxsz)
+/* The 5 accounts for the PNFS attributes, and assumes that at most three
+ * layout types will be returned.
+ */
+#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + \
+ nfs4_fattr_bitmap_maxsz + 4 + 8 + 5)
+#define encode_renew_maxsz (op_encode_hdr_maxsz + 3)
+#define decode_renew_maxsz (op_decode_hdr_maxsz)
+#define encode_setclientid_maxsz \
+ (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_VERIFIER_SIZE) + \
+ XDR_QUADLEN(NFS4_SETCLIENTID_NAMELEN) + \
+ 1 /* sc_prog */ + \
+ XDR_QUADLEN(RPCBIND_MAXNETIDLEN) + \
+ XDR_QUADLEN(RPCBIND_MAXUADDRLEN) + \
+ 1) /* sc_cb_ident */
+#define decode_setclientid_maxsz \
+ (op_decode_hdr_maxsz + \
+ 2 + \
+ 1024) /* large value for CLID_INUSE */
+#define encode_setclientid_confirm_maxsz \
+ (op_encode_hdr_maxsz + \
+ 3 + (NFS4_VERIFIER_SIZE >> 2))
+#define decode_setclientid_confirm_maxsz \
+ (op_decode_hdr_maxsz)
+#define encode_lookup_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
+#define decode_lookup_maxsz (op_decode_hdr_maxsz)
+#define encode_share_access_maxsz \
+ (2)
+#define encode_createmode_maxsz (1 + encode_attrs_maxsz + encode_verifier_maxsz)
+#define encode_opentype_maxsz (1 + encode_createmode_maxsz)
+#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
+#define encode_open_maxsz (op_encode_hdr_maxsz + \
+ 2 + encode_share_access_maxsz + 2 + \
+ open_owner_id_maxsz + \
+ encode_opentype_maxsz + \
+ encode_claim_null_maxsz)
+#define decode_ace_maxsz (3 + nfs4_owner_maxsz)
+#define decode_delegation_maxsz (1 + decode_stateid_maxsz + 1 + \
+ decode_ace_maxsz)
+#define decode_change_info_maxsz (5)
+#define decode_open_maxsz (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz + \
+ decode_change_info_maxsz + 1 + \
+ nfs4_fattr_bitmap_maxsz + \
+ decode_delegation_maxsz)
+#define encode_open_confirm_maxsz \
+ (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 1)
+#define decode_open_confirm_maxsz \
+ (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz)
+#define encode_open_downgrade_maxsz \
+ (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 1 + \
+ encode_share_access_maxsz)
+#define decode_open_downgrade_maxsz \
+ (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz)
+#define encode_close_maxsz (op_encode_hdr_maxsz + \
+ 1 + encode_stateid_maxsz)
+#define decode_close_maxsz (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz)
+#define encode_setattr_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + \
+ encode_attrs_maxsz)
+#define decode_setattr_maxsz (op_decode_hdr_maxsz + \
+ nfs4_fattr_bitmap_maxsz)
+#define encode_read_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 3)
+#define decode_read_maxsz (op_decode_hdr_maxsz + 2)
+#define encode_readdir_maxsz (op_encode_hdr_maxsz + \
+ 2 + encode_verifier_maxsz + 5)
+#define decode_readdir_maxsz (op_decode_hdr_maxsz + \
+ decode_verifier_maxsz)
+#define encode_readlink_maxsz (op_encode_hdr_maxsz)
+#define decode_readlink_maxsz (op_decode_hdr_maxsz + 1)
+#define encode_write_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 4)
+#define decode_write_maxsz (op_decode_hdr_maxsz + \
+ 2 + decode_verifier_maxsz)
+#define encode_commit_maxsz (op_encode_hdr_maxsz + 3)
+#define decode_commit_maxsz (op_decode_hdr_maxsz + \
+ decode_verifier_maxsz)
+#define encode_remove_maxsz (op_encode_hdr_maxsz + \
+ nfs4_name_maxsz)
+#define decode_remove_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz)
+#define encode_rename_maxsz (op_encode_hdr_maxsz + \
+ 2 * nfs4_name_maxsz)
+#define decode_rename_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz + \
+ decode_change_info_maxsz)
+#define encode_link_maxsz (op_encode_hdr_maxsz + \
+ nfs4_name_maxsz)
+#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
+#define encode_lockowner_maxsz (7)
+#define encode_lock_maxsz (op_encode_hdr_maxsz + \
+ 7 + \
+ 1 + encode_stateid_maxsz + 1 + \
+ encode_lockowner_maxsz)
+#define decode_lock_denied_maxsz \
+ (8 + decode_lockowner_maxsz)
+#define decode_lock_maxsz (op_decode_hdr_maxsz + \
+ decode_lock_denied_maxsz)
+#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
+ encode_lockowner_maxsz)
+#define decode_lockt_maxsz (op_decode_hdr_maxsz + \
+ decode_lock_denied_maxsz)
+#define encode_locku_maxsz (op_encode_hdr_maxsz + 3 + \
+ encode_stateid_maxsz + \
+ 4)
+#define decode_locku_maxsz (op_decode_hdr_maxsz + \
+ decode_stateid_maxsz)
+#define encode_release_lockowner_maxsz \
+ (op_encode_hdr_maxsz + \
+ encode_lockowner_maxsz)
+#define decode_release_lockowner_maxsz \
+ (op_decode_hdr_maxsz)
+#define encode_access_maxsz (op_encode_hdr_maxsz + 1)
+#define decode_access_maxsz (op_decode_hdr_maxsz + 2)
+#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
+ 1 + nfs4_name_maxsz + \
+ 1 + \
+ nfs4_fattr_maxsz)
+#define decode_symlink_maxsz (op_decode_hdr_maxsz + 8)
+#define encode_create_maxsz (op_encode_hdr_maxsz + \
+ 1 + 2 + nfs4_name_maxsz + \
+ encode_attrs_maxsz)
+#define decode_create_maxsz (op_decode_hdr_maxsz + \
+ decode_change_info_maxsz + \
+ nfs4_fattr_bitmap_maxsz)
+#define encode_statfs_maxsz (encode_getattr_maxsz)
+#define decode_statfs_maxsz (decode_getattr_maxsz)
+#define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_delegreturn_maxsz (op_decode_hdr_maxsz)
+#define encode_getacl_maxsz (encode_getattr_maxsz)
+#define decode_getacl_maxsz (op_decode_hdr_maxsz + \
+ nfs4_fattr_bitmap_maxsz + 1)
+#define encode_setacl_maxsz (op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + 3)
+#define decode_setacl_maxsz (decode_setattr_maxsz)
+#define encode_fs_locations_maxsz \
+ (encode_getattr_maxsz)
+#define decode_fs_locations_maxsz \
+ (0)
+#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
+#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
+
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_MAX_MACHINE_NAME_LEN (64)
+
+#define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \
+ encode_verifier_maxsz + \
+ 1 /* co_ownerid.len */ + \
+ XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \
+ 1 /* flags */ + \
+ 1 /* spa_how */ + \
+ 0 /* SP4_NONE (for now) */ + \
+ 1 /* implementation id array of size 1 */ + \
+ 1 /* nii_domain */ + \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
+ 1 /* nii_name */ + \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
+ 3 /* nii_date */)
+#define decode_exchange_id_maxsz (op_decode_hdr_maxsz + \
+ 2 /* eir_clientid */ + \
+ 1 /* eir_sequenceid */ + \
+ 1 /* eir_flags */ + \
+ 1 /* spr_how */ + \
+ 0 /* SP4_NONE (for now) */ + \
+ 2 /* eir_server_owner.so_minor_id */ + \
+ /* eir_server_owner.so_major_id<> */ \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
+ /* eir_server_scope<> */ \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 + \
+ 1 /* eir_server_impl_id array length */ + \
+ 1 /* nii_domain */ + \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
+ 1 /* nii_name */ + \
+ XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \
+ 3 /* nii_date */)
+#define encode_channel_attrs_maxsz (6 + 1 /* ca_rdma_ird.len (0) */)
+#define decode_channel_attrs_maxsz (6 + \
+ 1 /* ca_rdma_ird.len */ + \
+ 1 /* ca_rdma_ird */)
+#define encode_create_session_maxsz (op_encode_hdr_maxsz + \
+ 2 /* csa_clientid */ + \
+ 1 /* csa_sequence */ + \
+ 1 /* csa_flags */ + \
+ encode_channel_attrs_maxsz + \
+ encode_channel_attrs_maxsz + \
+ 1 /* csa_cb_program */ + \
+ 1 /* csa_sec_parms.len (1) */ + \
+ 1 /* cb_secflavor (AUTH_SYS) */ + \
+ 1 /* stamp */ + \
+ 1 /* machinename.len */ + \
+ XDR_QUADLEN(NFS4_MAX_MACHINE_NAME_LEN) + \
+ 1 /* uid */ + \
+ 1 /* gid */ + \
+ 1 /* gids.len (0) */)
+#define decode_create_session_maxsz (op_decode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
+ 1 /* csr_sequence */ + \
+ 1 /* csr_flags */ + \
+ decode_channel_attrs_maxsz + \
+ decode_channel_attrs_maxsz)
+#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_destroy_session_maxsz (op_decode_hdr_maxsz)
+#define encode_sequence_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
+#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5)
+#define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4)
+#define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4)
+#define encode_getdevicelist_maxsz (op_encode_hdr_maxsz + 4 + \
+ encode_verifier_maxsz)
+#define decode_getdevicelist_maxsz (op_decode_hdr_maxsz + \
+ 2 /* nfs_cookie4 gdlr_cookie */ + \
+ decode_verifier_maxsz \
+ /* verifier4 gdlr_verifier */ + \
+ 1 /* gdlr_deviceid_list count */ + \
+ XDR_QUADLEN(NFS4_PNFS_GETDEVLIST_MAXNUM * \
+ NFS4_DEVICEID4_SIZE) \
+ /* gdlr_deviceid_list */ + \
+ 1 /* bool gdlr_eof */)
+#define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + 4 + \
+ XDR_QUADLEN(NFS4_DEVICEID4_SIZE))
+#define decode_getdeviceinfo_maxsz (op_decode_hdr_maxsz + \
+ 1 /* layout type */ + \
+ 1 /* opaque devaddr4 length */ + \
+ /* devaddr4 payload is read into page */ \
+ 1 /* notification bitmap length */ + \
+ 1 /* notification bitmap */)
+#define encode_layoutget_maxsz (op_encode_hdr_maxsz + 10 + \
+ encode_stateid_maxsz)
+#define decode_layoutget_maxsz (op_decode_hdr_maxsz + 8 + \
+ decode_stateid_maxsz + \
+ XDR_QUADLEN(PNFS_LAYOUT_MAXSIZE))
+#define encode_layoutcommit_maxsz (op_encode_hdr_maxsz + \
+ 2 /* offset */ + \
+ 2 /* length */ + \
+ 1 /* reclaim */ + \
+ encode_stateid_maxsz + \
+ 1 /* new offset (true) */ + \
+ 2 /* last byte written */ + \
+ 1 /* nt_timechanged (false) */ + \
+ 1 /* layoutupdate4 layout type */ + \
+ 1 /* NULL filelayout layoutupdate4 payload */)
+#define decode_layoutcommit_maxsz (op_decode_hdr_maxsz + 3)
+#define encode_layoutreturn_maxsz (8 + op_encode_hdr_maxsz + \
+ encode_stateid_maxsz + \
+ 1 /* FIXME: opaque lrf_body always empty at the moment */)
+#define decode_layoutreturn_maxsz (op_decode_hdr_maxsz + \
+ 1 + decode_stateid_maxsz)
+#define encode_secinfo_no_name_maxsz (op_encode_hdr_maxsz + 1)
+#define decode_secinfo_no_name_maxsz decode_secinfo_maxsz
+#define encode_test_stateid_maxsz (op_encode_hdr_maxsz + 2 + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_test_stateid_maxsz (op_decode_hdr_maxsz + 2 + 1)
+#define encode_free_stateid_maxsz (op_encode_hdr_maxsz + 1 + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_free_stateid_maxsz (op_decode_hdr_maxsz + 1)
+#else /* CONFIG_NFS_V4_1 */
+#define encode_sequence_maxsz 0
+#define decode_sequence_maxsz 0
+#endif /* CONFIG_NFS_V4_1 */
+
+#define NFS4_enc_compound_sz (1024) /* XXX: large enough? */
+#define NFS4_dec_compound_sz (1024) /* XXX: large enough? */
+#define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_read_maxsz)
+#define NFS4_dec_read_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_read_maxsz)
+#define NFS4_enc_readlink_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_readlink_maxsz)
+#define NFS4_dec_readlink_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_readlink_maxsz)
+#define NFS4_enc_readdir_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_readdir_maxsz)
+#define NFS4_dec_readdir_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_readdir_maxsz)
+#define NFS4_enc_write_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_write_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_write_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_write_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_commit_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_commit_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_savefh_maxsz + \
+ encode_open_maxsz + \
+ encode_getfh_maxsz + \
+ encode_getattr_maxsz + \
+ encode_restorefh_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_savefh_maxsz + \
+ decode_open_maxsz + \
+ decode_getfh_maxsz + \
+ decode_getattr_maxsz + \
+ decode_restorefh_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_open_confirm_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_putfh_maxsz + \
+ encode_open_confirm_maxsz)
+#define NFS4_dec_open_confirm_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_putfh_maxsz + \
+ decode_open_confirm_maxsz)
+#define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_open_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_open_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_open_downgrade_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_open_downgrade_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_open_downgrade_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_open_downgrade_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_close_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_close_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_close_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_close_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_setattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_setattr_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_setattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_setattr_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_fsinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_fsinfo_maxsz)
+#define NFS4_dec_fsinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_fsinfo_maxsz)
+#define NFS4_enc_renew_sz (compound_encode_hdr_maxsz + \
+ encode_renew_maxsz)
+#define NFS4_dec_renew_sz (compound_decode_hdr_maxsz + \
+ decode_renew_maxsz)
+#define NFS4_enc_setclientid_sz (compound_encode_hdr_maxsz + \
+ encode_setclientid_maxsz)
+#define NFS4_dec_setclientid_sz (compound_decode_hdr_maxsz + \
+ decode_setclientid_maxsz)
+#define NFS4_enc_setclientid_confirm_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_setclientid_confirm_maxsz + \
+ encode_putrootfh_maxsz + \
+ encode_fsinfo_maxsz)
+#define NFS4_dec_setclientid_confirm_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_setclientid_confirm_maxsz + \
+ decode_putrootfh_maxsz + \
+ decode_fsinfo_maxsz)
+#define NFS4_enc_lock_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_lock_maxsz)
+#define NFS4_dec_lock_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_lock_maxsz)
+#define NFS4_enc_lockt_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_lockt_maxsz)
+#define NFS4_dec_lockt_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_lockt_maxsz)
+#define NFS4_enc_locku_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_locku_maxsz)
+#define NFS4_dec_locku_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_locku_maxsz)
+#define NFS4_enc_release_lockowner_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_lockowner_maxsz)
+#define NFS4_dec_release_lockowner_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_lockowner_maxsz)
+#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_access_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_access_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_access_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_lookup_maxsz + \
+ encode_getattr_maxsz + \
+ encode_getfh_maxsz)
+#define NFS4_dec_lookup_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_lookup_maxsz + \
+ decode_getattr_maxsz + \
+ decode_getfh_maxsz)
+#define NFS4_enc_lookup_root_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putrootfh_maxsz + \
+ encode_getattr_maxsz + \
+ encode_getfh_maxsz)
+#define NFS4_dec_lookup_root_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putrootfh_maxsz + \
+ decode_getattr_maxsz + \
+ decode_getfh_maxsz)
+#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_remove_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_remove_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_savefh_maxsz + \
+ encode_putfh_maxsz + \
+ encode_rename_maxsz + \
+ encode_getattr_maxsz + \
+ encode_restorefh_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_savefh_maxsz + \
+ decode_putfh_maxsz + \
+ decode_rename_maxsz + \
+ decode_getattr_maxsz + \
+ decode_restorefh_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_savefh_maxsz + \
+ encode_putfh_maxsz + \
+ encode_link_maxsz + \
+ decode_getattr_maxsz + \
+ encode_restorefh_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_savefh_maxsz + \
+ decode_putfh_maxsz + \
+ decode_link_maxsz + \
+ decode_getattr_maxsz + \
+ decode_restorefh_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_symlink_maxsz + \
+ encode_getattr_maxsz + \
+ encode_getfh_maxsz)
+#define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_symlink_maxsz + \
+ decode_getattr_maxsz + \
+ decode_getfh_maxsz)
+#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_savefh_maxsz + \
+ encode_create_maxsz + \
+ encode_getfh_maxsz + \
+ encode_getattr_maxsz + \
+ encode_restorefh_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_savefh_maxsz + \
+ decode_create_maxsz + \
+ decode_getfh_maxsz + \
+ decode_getattr_maxsz + \
+ decode_restorefh_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_pathconf_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_statfs_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_statfs_maxsz)
+#define NFS4_dec_statfs_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_statfs_maxsz)
+#define NFS4_enc_server_caps_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_delegreturn_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_delegreturn_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_getacl_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getacl_maxsz)
+#define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getacl_maxsz)
+#define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_setacl_maxsz)
+#define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_setacl_maxsz)
+#define NFS4_enc_fs_locations_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_lookup_maxsz + \
+ encode_fs_locations_maxsz)
+#define NFS4_dec_fs_locations_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_lookup_maxsz + \
+ decode_fs_locations_maxsz)
+#define NFS4_enc_secinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_secinfo_maxsz)
+#define NFS4_dec_secinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_secinfo_maxsz)
+#if defined(CONFIG_NFS_V4_1)
+#define NFS4_enc_exchange_id_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_exchange_id_maxsz)
+#define NFS4_dec_exchange_id_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_exchange_id_maxsz)
+#define NFS4_enc_create_session_sz \
+ (compound_encode_hdr_maxsz + \
+ encode_create_session_maxsz)
+#define NFS4_dec_create_session_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_create_session_maxsz)
+#define NFS4_enc_destroy_session_sz (compound_encode_hdr_maxsz + \
+ encode_destroy_session_maxsz)
+#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \
+ decode_destroy_session_maxsz)
+#define NFS4_enc_sequence_sz \
+ (compound_decode_hdr_maxsz + \
+ encode_sequence_maxsz)
+#define NFS4_dec_sequence_sz \
+ (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz)
+#define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putrootfh_maxsz + \
+ encode_fsinfo_maxsz)
+#define NFS4_dec_get_lease_time_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putrootfh_maxsz + \
+ decode_fsinfo_maxsz)
+#define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_reclaim_complete_maxsz)
+#define NFS4_dec_reclaim_complete_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_reclaim_complete_maxsz)
+#define NFS4_enc_getdevicelist_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_getdevicelist_maxsz)
+#define NFS4_dec_getdevicelist_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_getdevicelist_maxsz)
+#define NFS4_enc_getdeviceinfo_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz +\
+ encode_getdeviceinfo_maxsz)
+#define NFS4_dec_getdeviceinfo_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_getdeviceinfo_maxsz)
+#define NFS4_enc_layoutget_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_layoutget_maxsz)
+#define NFS4_dec_layoutget_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_layoutget_maxsz)
+#define NFS4_enc_layoutcommit_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz +\
+ encode_putfh_maxsz + \
+ encode_layoutcommit_maxsz + \
+ encode_getattr_maxsz)
+#define NFS4_dec_layoutcommit_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_layoutcommit_maxsz + \
+ decode_getattr_maxsz)
+#define NFS4_enc_layoutreturn_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_layoutreturn_maxsz)
+#define NFS4_dec_layoutreturn_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_layoutreturn_maxsz)
+#define NFS4_enc_secinfo_no_name_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putrootfh_maxsz +\
+ encode_secinfo_no_name_maxsz)
+#define NFS4_dec_secinfo_no_name_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putrootfh_maxsz + \
+ decode_secinfo_no_name_maxsz)
+#define NFS4_enc_test_stateid_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_test_stateid_maxsz)
+#define NFS4_dec_test_stateid_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_test_stateid_maxsz)
+#define NFS4_enc_free_stateid_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_free_stateid_maxsz)
+#define NFS4_dec_free_stateid_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_free_stateid_maxsz)
+
+const u32 nfs41_maxwrite_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+ compound_encode_hdr_maxsz +
+ encode_sequence_maxsz +
+ encode_putfh_maxsz +
+ encode_getattr_maxsz) *
+ XDR_UNIT);
+
+const u32 nfs41_maxread_overhead = ((RPC_MAX_HEADER_WITH_AUTH +
+ compound_decode_hdr_maxsz +
+ decode_sequence_maxsz +
+ decode_putfh_maxsz) *
+ XDR_UNIT);
+#endif /* CONFIG_NFS_V4_1 */
+
+static unsigned short send_implementation_id = 1;
+
+module_param(send_implementation_id, ushort, 0644);
+MODULE_PARM_DESC(send_implementation_id,
+ "Send implementation ID with NFSv4.1 exchange_id");
+
+static const umode_t nfs_type2fmt[] = {
+ [NF4BAD] = 0,
+ [NF4REG] = S_IFREG,
+ [NF4DIR] = S_IFDIR,
+ [NF4BLK] = S_IFBLK,
+ [NF4CHR] = S_IFCHR,
+ [NF4LNK] = S_IFLNK,
+ [NF4SOCK] = S_IFSOCK,
+ [NF4FIFO] = S_IFIFO,
+ [NF4ATTRDIR] = 0,
+ [NF4NAMEDATTR] = 0,
+};
+
+struct compound_hdr {
+ int32_t status;
+ uint32_t nops;
+ __be32 * nops_p;
+ uint32_t taglen;
+ char * tag;
+ uint32_t replen; /* expected reply words */
+ u32 minorversion;
+};
+
+static __be32 *reserve_space(struct xdr_stream *xdr, size_t nbytes)
+{
+ __be32 *p = xdr_reserve_space(xdr, nbytes);
+ BUG_ON(!p);
+ return p;
+}
+
+static void encode_opaque_fixed(struct xdr_stream *xdr, const void *buf, size_t len)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, len);
+ xdr_encode_opaque_fixed(p, buf, len);
+}
+
+static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4 + len);
+ xdr_encode_opaque(p, str, len);
+}
+
+static void encode_uint32(struct xdr_stream *xdr, u32 n)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(n);
+}
+
+static void encode_uint64(struct xdr_stream *xdr, u64 n)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 8);
+ xdr_encode_hyper(p, n);
+}
+
+static void encode_nfs4_seqid(struct xdr_stream *xdr,
+ const struct nfs_seqid *seqid)
+{
+ encode_uint32(xdr, seqid->sequence->counter);
+}
+
+static void encode_compound_hdr(struct xdr_stream *xdr,
+ struct rpc_rqst *req,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ struct rpc_auth *auth = req->rq_cred->cr_auth;
+
+ /* initialize running count of expected bytes in reply.
+ * NOTE: the replied tag SHOULD be the same is the one sent,
+ * but this is not required as a MUST for the server to do so. */
+ hdr->replen = RPC_REPHDRSIZE + auth->au_rslack + 3 + hdr->taglen;
+
+ BUG_ON(hdr->taglen > NFS4_MAXTAGLEN);
+ encode_string(xdr, hdr->taglen, hdr->tag);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(hdr->minorversion);
+ hdr->nops_p = p;
+ *p = cpu_to_be32(hdr->nops);
+}
+
+static void encode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 op,
+ uint32_t replen,
+ struct compound_hdr *hdr)
+{
+ encode_uint32(xdr, op);
+ hdr->nops++;
+ hdr->replen += replen;
+}
+
+static void encode_nops(struct compound_hdr *hdr)
+{
+ BUG_ON(hdr->nops > NFS4_MAX_OPS);
+ *hdr->nops_p = htonl(hdr->nops);
+}
+
+static void encode_nfs4_stateid(struct xdr_stream *xdr, const nfs4_stateid *stateid)
+{
+ encode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
+}
+
+static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf)
+{
+ encode_opaque_fixed(xdr, verf->data, NFS4_VERIFIER_SIZE);
+}
+
+static void encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server)
+{
+ char owner_name[IDMAP_NAMESZ];
+ char owner_group[IDMAP_NAMESZ];
+ int owner_namelen = 0;
+ int owner_grouplen = 0;
+ __be32 *p;
+ __be32 *q;
+ int len;
+ uint32_t bmval0 = 0;
+ uint32_t bmval1 = 0;
+
+ /*
+ * We reserve enough space to write the entire attribute buffer at once.
+ * In the worst-case, this would be
+ * 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime)
+ * = 36 bytes, plus any contribution from variable-length fields
+ * such as owner/group.
+ */
+ len = 16;
+
+ /* Sigh */
+ if (iap->ia_valid & ATTR_SIZE)
+ len += 8;
+ if (iap->ia_valid & ATTR_MODE)
+ len += 4;
+ if (iap->ia_valid & ATTR_UID) {
+ owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, owner_name, IDMAP_NAMESZ);
+ if (owner_namelen < 0) {
+ dprintk("nfs: couldn't resolve uid %d to string\n",
+ iap->ia_uid);
+ /* XXX */
+ strcpy(owner_name, "nobody");
+ owner_namelen = sizeof("nobody") - 1;
+ /* goto out; */
+ }
+ len += 4 + (XDR_QUADLEN(owner_namelen) << 2);
+ }
+ if (iap->ia_valid & ATTR_GID) {
+ owner_grouplen = nfs_map_gid_to_group(server, iap->ia_gid, owner_group, IDMAP_NAMESZ);
+ if (owner_grouplen < 0) {
+ dprintk("nfs: couldn't resolve gid %d to string\n",
+ iap->ia_gid);
+ strcpy(owner_group, "nobody");
+ owner_grouplen = sizeof("nobody") - 1;
+ /* goto out; */
+ }
+ len += 4 + (XDR_QUADLEN(owner_grouplen) << 2);
+ }
+ if (iap->ia_valid & ATTR_ATIME_SET)
+ len += 16;
+ else if (iap->ia_valid & ATTR_ATIME)
+ len += 4;
+ if (iap->ia_valid & ATTR_MTIME_SET)
+ len += 16;
+ else if (iap->ia_valid & ATTR_MTIME)
+ len += 4;
+ p = reserve_space(xdr, len);
+
+ /*
+ * We write the bitmap length now, but leave the bitmap and the attribute
+ * buffer length to be backfilled at the end of this routine.
+ */
+ *p++ = cpu_to_be32(2);
+ q = p;
+ p += 3;
+
+ if (iap->ia_valid & ATTR_SIZE) {
+ bmval0 |= FATTR4_WORD0_SIZE;
+ p = xdr_encode_hyper(p, iap->ia_size);
+ }
+ if (iap->ia_valid & ATTR_MODE) {
+ bmval1 |= FATTR4_WORD1_MODE;
+ *p++ = cpu_to_be32(iap->ia_mode & S_IALLUGO);
+ }
+ if (iap->ia_valid & ATTR_UID) {
+ bmval1 |= FATTR4_WORD1_OWNER;
+ p = xdr_encode_opaque(p, owner_name, owner_namelen);
+ }
+ if (iap->ia_valid & ATTR_GID) {
+ bmval1 |= FATTR4_WORD1_OWNER_GROUP;
+ p = xdr_encode_opaque(p, owner_group, owner_grouplen);
+ }
+ if (iap->ia_valid & ATTR_ATIME_SET) {
+ bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
+ *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
+ *p++ = cpu_to_be32(0);
+ *p++ = cpu_to_be32(iap->ia_atime.tv_sec);
+ *p++ = cpu_to_be32(iap->ia_atime.tv_nsec);
+ }
+ else if (iap->ia_valid & ATTR_ATIME) {
+ bmval1 |= FATTR4_WORD1_TIME_ACCESS_SET;
+ *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
+ }
+ if (iap->ia_valid & ATTR_MTIME_SET) {
+ bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
+ *p++ = cpu_to_be32(NFS4_SET_TO_CLIENT_TIME);
+ *p++ = cpu_to_be32(0);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_sec);
+ *p++ = cpu_to_be32(iap->ia_mtime.tv_nsec);
+ }
+ else if (iap->ia_valid & ATTR_MTIME) {
+ bmval1 |= FATTR4_WORD1_TIME_MODIFY_SET;
+ *p++ = cpu_to_be32(NFS4_SET_TO_SERVER_TIME);
+ }
+
+ /*
+ * Now we backfill the bitmap and the attribute buffer length.
+ */
+ if (len != ((char *)p - (char *)q) + 4) {
+ printk(KERN_ERR "NFS: Attr length error, %u != %Zu\n",
+ len, ((char *)p - (char *)q) + 4);
+ BUG();
+ }
+ len = (char *)p - (char *)q - 12;
+ *q++ = htonl(bmval0);
+ *q++ = htonl(bmval1);
+ *q = htonl(len);
+
+/* out: */
+}
+
+static void encode_access(struct xdr_stream *xdr, u32 access, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_ACCESS, decode_access_maxsz, hdr);
+ encode_uint32(xdr, access);
+}
+
+static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_CLOSE, decode_close_maxsz, hdr);
+ encode_nfs4_seqid(xdr, arg->seqid);
+ encode_nfs4_stateid(xdr, arg->stateid);
+}
+
+static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_COMMIT, decode_commit_maxsz, hdr);
+ p = reserve_space(xdr, 12);
+ p = xdr_encode_hyper(p, args->offset);
+ *p = cpu_to_be32(args->count);
+}
+
+static void encode_create(struct xdr_stream *xdr, const struct nfs4_create_arg *create, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_CREATE, decode_create_maxsz, hdr);
+ encode_uint32(xdr, create->ftype);
+
+ switch (create->ftype) {
+ case NF4LNK:
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(create->u.symlink.len);
+ xdr_write_pages(xdr, create->u.symlink.pages, 0, create->u.symlink.len);
+ break;
+
+ case NF4BLK: case NF4CHR:
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(create->u.device.specdata1);
+ *p = cpu_to_be32(create->u.device.specdata2);
+ break;
+
+ default:
+ break;
+ }
+
+ encode_string(xdr, create->name->len, create->name->name);
+ encode_attrs(xdr, create->attrs, create->server);
+}
+
+static void encode_getattr_one(struct xdr_stream *xdr, uint32_t bitmap, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr);
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(bitmap);
+}
+
+static void encode_getattr_two(struct xdr_stream *xdr, uint32_t bm0, uint32_t bm1, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr);
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(2);
+ *p++ = cpu_to_be32(bm0);
+ *p = cpu_to_be32(bm1);
+}
+
+static void
+encode_getattr_three(struct xdr_stream *xdr,
+ uint32_t bm0, uint32_t bm1, uint32_t bm2,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_GETATTR, decode_getattr_maxsz, hdr);
+ if (bm2) {
+ p = reserve_space(xdr, 16);
+ *p++ = cpu_to_be32(3);
+ *p++ = cpu_to_be32(bm0);
+ *p++ = cpu_to_be32(bm1);
+ *p = cpu_to_be32(bm2);
+ } else if (bm1) {
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(2);
+ *p++ = cpu_to_be32(bm0);
+ *p = cpu_to_be32(bm1);
+ } else {
+ p = reserve_space(xdr, 8);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(bm0);
+ }
+}
+
+static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
+{
+ encode_getattr_two(xdr, bitmask[0] & nfs4_fattr_bitmap[0],
+ bitmask[1] & nfs4_fattr_bitmap[1], hdr);
+}
+
+static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
+{
+ encode_getattr_three(xdr,
+ bitmask[0] & nfs4_fsinfo_bitmap[0],
+ bitmask[1] & nfs4_fsinfo_bitmap[1],
+ bitmask[2] & nfs4_fsinfo_bitmap[2],
+ hdr);
+}
+
+static void encode_fs_locations(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
+{
+ encode_getattr_two(xdr, bitmask[0] & nfs4_fs_locations_bitmap[0],
+ bitmask[1] & nfs4_fs_locations_bitmap[1], hdr);
+}
+
+static void encode_getfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_GETFH, decode_getfh_maxsz, hdr);
+}
+
+static void encode_link(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_LINK, decode_link_maxsz, hdr);
+ encode_string(xdr, name->len, name->name);
+}
+
+static inline int nfs4_lock_type(struct file_lock *fl, int block)
+{
+ if ((fl->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) == F_RDLCK)
+ return block ? NFS4_READW_LT : NFS4_READ_LT;
+ return block ? NFS4_WRITEW_LT : NFS4_WRITE_LT;
+}
+
+static inline uint64_t nfs4_lock_length(struct file_lock *fl)
+{
+ if (fl->fl_end == OFFSET_MAX)
+ return ~(uint64_t)0;
+ return fl->fl_end - fl->fl_start + 1;
+}
+
+static void encode_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 32);
+ p = xdr_encode_hyper(p, lowner->clientid);
+ *p++ = cpu_to_be32(20);
+ p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+ *p++ = cpu_to_be32(lowner->s_dev);
+ xdr_encode_hyper(p, lowner->id);
+}
+
+/*
+ * opcode,type,reclaim,offset,length,new_lock_owner = 32
+ * open_seqid,open_stateid,lock_seqid,lock_owner.clientid, lock_owner.id = 40
+ */
+static void encode_lock(struct xdr_stream *xdr, const struct nfs_lock_args *args, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_LOCK, decode_lock_maxsz, hdr);
+ p = reserve_space(xdr, 28);
+ *p++ = cpu_to_be32(nfs4_lock_type(args->fl, args->block));
+ *p++ = cpu_to_be32(args->reclaim);
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
+ *p = cpu_to_be32(args->new_lock_owner);
+ if (args->new_lock_owner){
+ encode_nfs4_seqid(xdr, args->open_seqid);
+ encode_nfs4_stateid(xdr, args->open_stateid);
+ encode_nfs4_seqid(xdr, args->lock_seqid);
+ encode_lockowner(xdr, &args->lock_owner);
+ }
+ else {
+ encode_nfs4_stateid(xdr, args->lock_stateid);
+ encode_nfs4_seqid(xdr, args->lock_seqid);
+ }
+}
+
+static void encode_lockt(struct xdr_stream *xdr, const struct nfs_lockt_args *args, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_LOCKT, decode_lockt_maxsz, hdr);
+ p = reserve_space(xdr, 20);
+ *p++ = cpu_to_be32(nfs4_lock_type(args->fl, 0));
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ p = xdr_encode_hyper(p, nfs4_lock_length(args->fl));
+ encode_lockowner(xdr, &args->lock_owner);
+}
+
+static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *args, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_LOCKU, decode_locku_maxsz, hdr);
+ encode_uint32(xdr, nfs4_lock_type(args->fl, 0));
+ encode_nfs4_seqid(xdr, args->seqid);
+ encode_nfs4_stateid(xdr, args->stateid);
+ p = reserve_space(xdr, 16);
+ p = xdr_encode_hyper(p, args->fl->fl_start);
+ xdr_encode_hyper(p, nfs4_lock_length(args->fl));
+}
+
+static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_RELEASE_LOCKOWNER, decode_release_lockowner_maxsz, hdr);
+ encode_lockowner(xdr, lowner);
+}
+
+static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_LOOKUP, decode_lookup_maxsz, hdr);
+ encode_string(xdr, name->len, name->name);
+}
+
+static void encode_share_access(struct xdr_stream *xdr, fmode_t fmode)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 8);
+ switch (fmode & (FMODE_READ|FMODE_WRITE)) {
+ case FMODE_READ:
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_READ);
+ break;
+ case FMODE_WRITE:
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_WRITE);
+ break;
+ case FMODE_READ|FMODE_WRITE:
+ *p++ = cpu_to_be32(NFS4_SHARE_ACCESS_BOTH);
+ break;
+ default:
+ *p++ = cpu_to_be32(0);
+ }
+ *p = cpu_to_be32(0); /* for linux, share_deny = 0 always */
+}
+
+static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg)
+{
+ __be32 *p;
+ /*
+ * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
+ * owner 4 = 32
+ */
+ encode_nfs4_seqid(xdr, arg->seqid);
+ encode_share_access(xdr, arg->fmode);
+ p = reserve_space(xdr, 36);
+ p = xdr_encode_hyper(p, arg->clientid);
+ *p++ = cpu_to_be32(24);
+ p = xdr_encode_opaque_fixed(p, "open id:", 8);
+ *p++ = cpu_to_be32(arg->server->s_dev);
+ *p++ = cpu_to_be32(arg->id.uniquifier);
+ xdr_encode_hyper(p, arg->id.create_time);
+}
+
+static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
+{
+ __be32 *p;
+ struct nfs_client *clp;
+
+ p = reserve_space(xdr, 4);
+ switch(arg->open_flags & O_EXCL) {
+ case 0:
+ *p = cpu_to_be32(NFS4_CREATE_UNCHECKED);
+ encode_attrs(xdr, arg->u.attrs, arg->server);
+ break;
+ default:
+ clp = arg->server->nfs_client;
+ if (clp->cl_mvops->minor_version > 0) {
+ if (nfs4_has_persistent_session(clp)) {
+ *p = cpu_to_be32(NFS4_CREATE_GUARDED);
+ encode_attrs(xdr, arg->u.attrs, arg->server);
+ } else {
+ struct iattr dummy;
+
+ *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE4_1);
+ encode_nfs4_verifier(xdr, &arg->u.verifier);
+ dummy.ia_valid = 0;
+ encode_attrs(xdr, &dummy, arg->server);
+ }
+ } else {
+ *p = cpu_to_be32(NFS4_CREATE_EXCLUSIVE);
+ encode_nfs4_verifier(xdr, &arg->u.verifier);
+ }
+ }
+}
+
+static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *arg)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4);
+ switch (arg->open_flags & O_CREAT) {
+ case 0:
+ *p = cpu_to_be32(NFS4_OPEN_NOCREATE);
+ break;
+ default:
+ BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
+ *p = cpu_to_be32(NFS4_OPEN_CREATE);
+ encode_createmode(xdr, arg);
+ }
+}
+
+static inline void encode_delegation_type(struct xdr_stream *xdr, fmode_t delegation_type)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4);
+ switch (delegation_type) {
+ case 0:
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_NONE);
+ break;
+ case FMODE_READ:
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_READ);
+ break;
+ case FMODE_WRITE|FMODE_READ:
+ *p = cpu_to_be32(NFS4_OPEN_DELEGATE_WRITE);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *name)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(NFS4_OPEN_CLAIM_NULL);
+ encode_string(xdr, name->len, name->name);
+}
+
+static inline void encode_claim_previous(struct xdr_stream *xdr, fmode_t type)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(NFS4_OPEN_CLAIM_PREVIOUS);
+ encode_delegation_type(xdr, type);
+}
+
+static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struct qstr *name, const nfs4_stateid *stateid)
+{
+ __be32 *p;
+
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
+ encode_nfs4_stateid(xdr, stateid);
+ encode_string(xdr, name->len, name->name);
+}
+
+static void encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OPEN, decode_open_maxsz, hdr);
+ encode_openhdr(xdr, arg);
+ encode_opentype(xdr, arg);
+ switch (arg->claim) {
+ case NFS4_OPEN_CLAIM_NULL:
+ encode_claim_null(xdr, arg->name);
+ break;
+ case NFS4_OPEN_CLAIM_PREVIOUS:
+ encode_claim_previous(xdr, arg->u.delegation_type);
+ break;
+ case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+ encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OPEN_CONFIRM, decode_open_confirm_maxsz, hdr);
+ encode_nfs4_stateid(xdr, arg->stateid);
+ encode_nfs4_seqid(xdr, arg->seqid);
+}
+
+static void encode_open_downgrade(struct xdr_stream *xdr, const struct nfs_closeargs *arg, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OPEN_DOWNGRADE, decode_open_downgrade_maxsz, hdr);
+ encode_nfs4_stateid(xdr, arg->stateid);
+ encode_nfs4_seqid(xdr, arg->seqid);
+ encode_share_access(xdr, arg->fmode);
+}
+
+static void
+encode_putfh(struct xdr_stream *xdr, const struct nfs_fh *fh, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_PUTFH, decode_putfh_maxsz, hdr);
+ encode_string(xdr, fh->size, fh->data);
+}
+
+static void encode_putrootfh(struct xdr_stream *xdr, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_PUTROOTFH, decode_putrootfh_maxsz, hdr);
+}
+
+static void encode_open_stateid(struct xdr_stream *xdr,
+ const struct nfs_open_context *ctx,
+ const struct nfs_lock_context *l_ctx,
+ fmode_t fmode,
+ int zero_seqid)
+{
+ nfs4_stateid stateid;
+
+ if (ctx->state != NULL) {
+ nfs4_select_rw_stateid(&stateid, ctx->state,
+ fmode, l_ctx->lockowner, l_ctx->pid);
+ if (zero_seqid)
+ stateid.seqid = 0;
+ encode_nfs4_stateid(xdr, &stateid);
+ } else
+ encode_nfs4_stateid(xdr, &zero_stateid);
+}
+
+static void encode_read(struct xdr_stream *xdr, const struct nfs_readargs *args, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_READ, decode_read_maxsz, hdr);
+ encode_open_stateid(xdr, args->context, args->lock_context,
+ FMODE_READ, hdr->minorversion);
+
+ p = reserve_space(xdr, 12);
+ p = xdr_encode_hyper(p, args->offset);
+ *p = cpu_to_be32(args->count);
+}
+
+static void encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg *readdir, struct rpc_rqst *req, struct compound_hdr *hdr)
+{
+ uint32_t attrs[2] = {
+ FATTR4_WORD0_RDATTR_ERROR,
+ FATTR4_WORD1_MOUNTED_ON_FILEID,
+ };
+ uint32_t dircount = readdir->count >> 1;
+ __be32 *p, verf[2];
+
+ if (readdir->plus) {
+ attrs[0] |= FATTR4_WORD0_TYPE|FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE|
+ FATTR4_WORD0_FSID|FATTR4_WORD0_FILEHANDLE|FATTR4_WORD0_FILEID;
+ attrs[1] |= FATTR4_WORD1_MODE|FATTR4_WORD1_NUMLINKS|FATTR4_WORD1_OWNER|
+ FATTR4_WORD1_OWNER_GROUP|FATTR4_WORD1_RAWDEV|
+ FATTR4_WORD1_SPACE_USED|FATTR4_WORD1_TIME_ACCESS|
+ FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
+ dircount >>= 1;
+ }
+ /* Use mounted_on_fileid only if the server supports it */
+ if (!(readdir->bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID))
+ attrs[0] |= FATTR4_WORD0_FILEID;
+
+ encode_op_hdr(xdr, OP_READDIR, decode_readdir_maxsz, hdr);
+ encode_uint64(xdr, readdir->cookie);
+ encode_nfs4_verifier(xdr, &readdir->verifier);
+ p = reserve_space(xdr, 20);
+ *p++ = cpu_to_be32(dircount);
+ *p++ = cpu_to_be32(readdir->count);
+ *p++ = cpu_to_be32(2);
+
+ *p++ = cpu_to_be32(attrs[0] & readdir->bitmask[0]);
+ *p = cpu_to_be32(attrs[1] & readdir->bitmask[1]);
+ memcpy(verf, readdir->verifier.data, sizeof(verf));
+ dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
+ __func__,
+ (unsigned long long)readdir->cookie,
+ verf[0], verf[1],
+ attrs[0] & readdir->bitmask[0],
+ attrs[1] & readdir->bitmask[1]);
+}
+
+static void encode_readlink(struct xdr_stream *xdr, const struct nfs4_readlink *readlink, struct rpc_rqst *req, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_READLINK, decode_readlink_maxsz, hdr);
+}
+
+static void encode_remove(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_REMOVE, decode_remove_maxsz, hdr);
+ encode_string(xdr, name->len, name->name);
+}
+
+static void encode_rename(struct xdr_stream *xdr, const struct qstr *oldname, const struct qstr *newname, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_RENAME, decode_rename_maxsz, hdr);
+ encode_string(xdr, oldname->len, oldname->name);
+ encode_string(xdr, newname->len, newname->name);
+}
+
+static void encode_renew(struct xdr_stream *xdr, clientid4 clid,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_RENEW, decode_renew_maxsz, hdr);
+ encode_uint64(xdr, clid);
+}
+
+static void
+encode_restorefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_RESTOREFH, decode_restorefh_maxsz, hdr);
+}
+
+static void
+encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_SETATTR, decode_setacl_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &zero_stateid);
+ p = reserve_space(xdr, 2*4);
+ *p++ = cpu_to_be32(1);
+ *p = cpu_to_be32(FATTR4_WORD0_ACL);
+ BUG_ON(arg->acl_len % 4);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(arg->acl_len);
+ xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
+}
+
+static void
+encode_savefh(struct xdr_stream *xdr, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_SAVEFH, decode_savefh_maxsz, hdr);
+}
+
+static void encode_setattr(struct xdr_stream *xdr, const struct nfs_setattrargs *arg, const struct nfs_server *server, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_SETATTR, decode_setattr_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &arg->stateid);
+ encode_attrs(xdr, arg->iap, server);
+}
+
+static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_SETCLIENTID, decode_setclientid_maxsz, hdr);
+ encode_nfs4_verifier(xdr, setclientid->sc_verifier);
+
+ encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(setclientid->sc_prog);
+ encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid);
+ encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr);
+ p = reserve_space(xdr, 4);
+ *p = cpu_to_be32(setclientid->sc_cb_ident);
+}
+
+static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_SETCLIENTID_CONFIRM,
+ decode_setclientid_confirm_maxsz, hdr);
+ encode_uint64(xdr, arg->clientid);
+ encode_nfs4_verifier(xdr, &arg->confirm);
+}
+
+static void encode_write(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_WRITE, decode_write_maxsz, hdr);
+ encode_open_stateid(xdr, args->context, args->lock_context,
+ FMODE_WRITE, hdr->minorversion);
+
+ p = reserve_space(xdr, 16);
+ p = xdr_encode_hyper(p, args->offset);
+ *p++ = cpu_to_be32(args->stable);
+ *p = cpu_to_be32(args->count);
+
+ xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
+}
+
+static void encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_DELEGRETURN, decode_delegreturn_maxsz, hdr);
+ encode_nfs4_stateid(xdr, stateid);
+}
+
+static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_SECINFO, decode_secinfo_maxsz, hdr);
+ encode_string(xdr, name->len, name->name);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/* NFSv4.1 operations */
+static void encode_exchange_id(struct xdr_stream *xdr,
+ struct nfs41_exchange_id_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ char impl_name[NFS4_OPAQUE_LIMIT];
+ int len = 0;
+
+ encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
+ encode_nfs4_verifier(xdr, args->verifier);
+
+ encode_string(xdr, args->id_len, args->id);
+
+ p = reserve_space(xdr, 12);
+ *p++ = cpu_to_be32(args->flags);
+ *p++ = cpu_to_be32(0); /* zero length state_protect4_a */
+
+ if (send_implementation_id &&
+ sizeof(CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN) > 1 &&
+ sizeof(CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN)
+ <= NFS4_OPAQUE_LIMIT + 1)
+ len = snprintf(impl_name, sizeof(impl_name), "%s %s %s %s",
+ utsname()->sysname, utsname()->release,
+ utsname()->version, utsname()->machine);
+
+ if (len > 0) {
+ *p = cpu_to_be32(1); /* implementation id array length=1 */
+
+ encode_string(xdr,
+ sizeof(CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN) - 1,
+ CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN);
+ encode_string(xdr, len, impl_name);
+ /* just send zeros for nii_date - the date is in nii_name */
+ p = reserve_space(xdr, 12);
+ p = xdr_encode_hyper(p, 0);
+ *p = cpu_to_be32(0);
+ } else
+ *p = cpu_to_be32(0); /* implementation id array length=0 */
+}
+
+static void encode_create_session(struct xdr_stream *xdr,
+ struct nfs41_create_session_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
+ uint32_t len;
+ struct nfs_client *clp = args->client;
+ u32 max_resp_sz_cached;
+
+ /*
+ * Assumes OPEN is the biggest non-idempotent compound.
+ * 2 is the verifier.
+ */
+ max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE +
+ RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT;
+
+ len = scnprintf(machine_name, sizeof(machine_name), "%s",
+ clp->cl_ipaddr);
+
+ encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
+ p = reserve_space(xdr, 16 + 2*28 + 20 + len + 12);
+ p = xdr_encode_hyper(p, clp->cl_clientid);
+ *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
+ *p++ = cpu_to_be32(args->flags); /*flags */
+
+ /* Fore Channel */
+ *p++ = cpu_to_be32(0); /* header padding size */
+ *p++ = cpu_to_be32(args->fc_attrs.max_rqst_sz); /* max req size */
+ *p++ = cpu_to_be32(args->fc_attrs.max_resp_sz); /* max resp size */
+ *p++ = cpu_to_be32(max_resp_sz_cached); /* Max resp sz cached */
+ *p++ = cpu_to_be32(args->fc_attrs.max_ops); /* max operations */
+ *p++ = cpu_to_be32(args->fc_attrs.max_reqs); /* max requests */
+ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
+
+ /* Back Channel */
+ *p++ = cpu_to_be32(0); /* header padding size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_rqst_sz); /* max req size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz); /* max resp size */
+ *p++ = cpu_to_be32(args->bc_attrs.max_resp_sz_cached); /* Max resp sz cached */
+ *p++ = cpu_to_be32(args->bc_attrs.max_ops); /* max operations */
+ *p++ = cpu_to_be32(args->bc_attrs.max_reqs); /* max requests */
+ *p++ = cpu_to_be32(0); /* rdmachannel_attrs */
+
+ *p++ = cpu_to_be32(args->cb_program); /* cb_program */
+ *p++ = cpu_to_be32(1);
+ *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */
+
+ /* authsys_parms rfc1831 */
+ *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */
+ p = xdr_encode_opaque(p, machine_name, len);
+ *p++ = cpu_to_be32(0); /* UID */
+ *p++ = cpu_to_be32(0); /* GID */
+ *p = cpu_to_be32(0); /* No more gids */
+}
+
+static void encode_destroy_session(struct xdr_stream *xdr,
+ struct nfs4_session *session,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_DESTROY_SESSION, decode_destroy_session_maxsz, hdr);
+ encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+}
+
+static void encode_reclaim_complete(struct xdr_stream *xdr,
+ struct nfs41_reclaim_complete_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_RECLAIM_COMPLETE, decode_reclaim_complete_maxsz, hdr);
+ encode_uint32(xdr, args->one_fs);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static void encode_sequence(struct xdr_stream *xdr,
+ const struct nfs4_sequence_args *args,
+ struct compound_hdr *hdr)
+{
+#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_session *session = args->sa_session;
+ struct nfs4_slot_table *tp;
+ struct nfs4_slot *slot;
+ __be32 *p;
+
+ if (!session)
+ return;
+
+ tp = &session->fc_slot_table;
+
+ WARN_ON(args->sa_slotid == NFS4_MAX_SLOT_TABLE);
+ slot = tp->slots + args->sa_slotid;
+
+ encode_op_hdr(xdr, OP_SEQUENCE, decode_sequence_maxsz, hdr);
+
+ /*
+ * Sessionid + seqid + slotid + max slotid + cache_this
+ */
+ dprintk("%s: sessionid=%u:%u:%u:%u seqid=%d slotid=%d "
+ "max_slotid=%d cache_this=%d\n",
+ __func__,
+ ((u32 *)session->sess_id.data)[0],
+ ((u32 *)session->sess_id.data)[1],
+ ((u32 *)session->sess_id.data)[2],
+ ((u32 *)session->sess_id.data)[3],
+ slot->seq_nr, args->sa_slotid,
+ tp->highest_used_slotid, args->sa_cache_this);
+ p = reserve_space(xdr, NFS4_MAX_SESSIONID_LEN + 16);
+ p = xdr_encode_opaque_fixed(p, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+ *p++ = cpu_to_be32(slot->seq_nr);
+ *p++ = cpu_to_be32(args->sa_slotid);
+ *p++ = cpu_to_be32(tp->highest_used_slotid);
+ *p = cpu_to_be32(args->sa_cache_this);
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+#ifdef CONFIG_NFS_V4_1
+static void
+encode_getdevicelist(struct xdr_stream *xdr,
+ const struct nfs4_getdevicelist_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+ nfs4_verifier dummy = {
+ .data = "dummmmmy",
+ };
+
+ encode_op_hdr(xdr, OP_GETDEVICELIST, decode_getdevicelist_maxsz, hdr);
+ p = reserve_space(xdr, 16);
+ *p++ = cpu_to_be32(args->layoutclass);
+ *p++ = cpu_to_be32(NFS4_PNFS_GETDEVLIST_MAXNUM);
+ xdr_encode_hyper(p, 0ULL); /* cookie */
+ encode_nfs4_verifier(xdr, &dummy);
+}
+
+static void
+encode_getdeviceinfo(struct xdr_stream *xdr,
+ const struct nfs4_getdeviceinfo_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_GETDEVICEINFO, decode_getdeviceinfo_maxsz, hdr);
+ p = reserve_space(xdr, 12 + NFS4_DEVICEID4_SIZE);
+ p = xdr_encode_opaque_fixed(p, args->pdev->dev_id.data,
+ NFS4_DEVICEID4_SIZE);
+ *p++ = cpu_to_be32(args->pdev->layout_type);
+ *p++ = cpu_to_be32(args->pdev->pglen); /* gdia_maxcount */
+ *p++ = cpu_to_be32(0); /* bitmap length 0 */
+}
+
+static void
+encode_layoutget(struct xdr_stream *xdr,
+ const struct nfs4_layoutget_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_LAYOUTGET, decode_layoutget_maxsz, hdr);
+ p = reserve_space(xdr, 36);
+ *p++ = cpu_to_be32(0); /* Signal layout available */
+ *p++ = cpu_to_be32(args->type);
+ *p++ = cpu_to_be32(args->range.iomode);
+ p = xdr_encode_hyper(p, args->range.offset);
+ p = xdr_encode_hyper(p, args->range.length);
+ p = xdr_encode_hyper(p, args->minlength);
+ encode_nfs4_stateid(xdr, &args->stateid);
+ encode_uint32(xdr, args->maxcount);
+
+ dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
+ __func__,
+ args->type,
+ args->range.iomode,
+ (unsigned long)args->range.offset,
+ (unsigned long)args->range.length,
+ args->maxcount);
+}
+
+static int
+encode_layoutcommit(struct xdr_stream *xdr,
+ struct inode *inode,
+ const struct nfs4_layoutcommit_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ dprintk("%s: lbw: %llu type: %d\n", __func__, args->lastbytewritten,
+ NFS_SERVER(args->inode)->pnfs_curr_ld->id);
+
+ encode_op_hdr(xdr, OP_LAYOUTCOMMIT, decode_layoutcommit_maxsz, hdr);
+ p = reserve_space(xdr, 20);
+ /* Only whole file layouts */
+ p = xdr_encode_hyper(p, 0); /* offset */
+ p = xdr_encode_hyper(p, args->lastbytewritten + 1); /* length */
+ *p = cpu_to_be32(0); /* reclaim */
+ encode_nfs4_stateid(xdr, &args->stateid);
+ p = reserve_space(xdr, 20);
+ *p++ = cpu_to_be32(1); /* newoffset = TRUE */
+ p = xdr_encode_hyper(p, args->lastbytewritten);
+ *p++ = cpu_to_be32(0); /* Never send time_modify_changed */
+ *p++ = cpu_to_be32(NFS_SERVER(args->inode)->pnfs_curr_ld->id);/* type */
+
+ if (NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit)
+ NFS_SERVER(inode)->pnfs_curr_ld->encode_layoutcommit(
+ NFS_I(inode)->layout, xdr, args);
+ else
+ encode_uint32(xdr, 0); /* no layout-type payload */
+
+ return 0;
+}
+
+static void
+encode_layoutreturn(struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args,
+ struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ encode_op_hdr(xdr, OP_LAYOUTRETURN, decode_layoutreturn_maxsz, hdr);
+ p = reserve_space(xdr, 16);
+ *p++ = cpu_to_be32(0); /* reclaim. always 0 for now */
+ *p++ = cpu_to_be32(args->layout_type);
+ *p++ = cpu_to_be32(IOMODE_ANY);
+ *p = cpu_to_be32(RETURN_FILE);
+ p = reserve_space(xdr, 16);
+ p = xdr_encode_hyper(p, 0);
+ p = xdr_encode_hyper(p, NFS4_MAX_UINT64);
+ spin_lock(&args->inode->i_lock);
+ encode_nfs4_stateid(xdr, &args->stateid);
+ spin_unlock(&args->inode->i_lock);
+ if (NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn) {
+ NFS_SERVER(args->inode)->pnfs_curr_ld->encode_layoutreturn(
+ NFS_I(args->inode)->layout, xdr, args);
+ } else
+ encode_uint32(xdr, 0);
+}
+
+static int
+encode_secinfo_no_name(struct xdr_stream *xdr,
+ const struct nfs41_secinfo_no_name_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_SECINFO_NO_NAME, decode_secinfo_no_name_maxsz, hdr);
+ encode_uint32(xdr, args->style);
+ return 0;
+}
+
+static void encode_test_stateid(struct xdr_stream *xdr,
+ struct nfs41_test_stateid_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_TEST_STATEID, decode_test_stateid_maxsz, hdr);
+ encode_uint32(xdr, 1);
+ encode_nfs4_stateid(xdr, args->stateid);
+}
+
+static void encode_free_stateid(struct xdr_stream *xdr,
+ struct nfs41_free_stateid_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_FREE_STATEID, decode_free_stateid_maxsz, hdr);
+ encode_nfs4_stateid(xdr, args->stateid);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * END OF "GENERIC" ENCODE ROUTINES.
+ */
+
+static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
+{
+#if defined(CONFIG_NFS_V4_1)
+ if (args->sa_session)
+ return args->sa_session->clp->cl_mvops->minor_version;
+#endif /* CONFIG_NFS_V4_1 */
+ return 0;
+}
+
+/*
+ * Encode an ACCESS request
+ */
+static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_accessargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_access(xdr, args->access, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode LOOKUP request
+ */
+static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_lookup_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_lookup(xdr, args->name, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode LOOKUP_ROOT request
+ */
+static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs4_lookup_root_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putrootfh(xdr, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode REMOVE request
+ */
+static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs_removeargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_remove(xdr, &args->name, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode RENAME request
+ */
+static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs_renameargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->old_dir, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_putfh(xdr, args->new_dir, &hdr);
+ encode_rename(xdr, args->old_name, args->new_name, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_restorefh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode LINK request
+ */
+static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_link_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_link(xdr, args->name, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_restorefh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode CREATE request
+ */
+static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_create_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_create(xdr, args, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_restorefh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode SYMLINK request
+ */
+static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_create_arg *args)
+{
+ nfs4_xdr_enc_create(req, xdr, args);
+}
+
+/*
+ * Encode GETATTR request
+ */
+static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_getattr_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a CLOSE request
+ */
+static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_closeargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_close(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode an OPEN request
+ */
+static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_openargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_open(xdr, args, &hdr);
+ encode_getfh(xdr, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_restorefh(xdr, &hdr);
+ encode_getfattr(xdr, args->dir_bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode an OPEN_CONFIRM request
+ */
+static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_open_confirmargs *args)
+{
+ struct compound_hdr hdr = {
+ .nops = 0,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_open_confirm(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode an OPEN request with no attributes.
+ */
+static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_openargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_open(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode an OPEN_DOWNGRADE request
+ */
+static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_closeargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_open_downgrade(xdr, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a LOCK request
+ */
+static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_lock_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_lock(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a LOCKT request
+ */
+static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_lockt_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_lockt(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a LOCKU request
+ */
+static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_locku_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_locku(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_release_lockowner_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = 0,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_release_lockowner(xdr, &args->lock_owner, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a READLINK request
+ */
+static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_readlink *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_readlink(xdr, args, req, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
+ args->pgbase, args->pglen);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a READDIR request
+ */
+static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_readdir_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_readdir(xdr, args, req, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
+ args->pgbase, args->count);
+ dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
+ __func__, hdr.replen << 2, args->pages,
+ args->pgbase, args->count);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a READ request
+ */
+static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_readargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_read(xdr, args, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
+ args->pages, args->pgbase, args->count);
+ req->rq_rcv_buf.flags |= XDRBUF_READ;
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode an SETATTR request
+ */
+static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_setattrargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_setattr(xdr, args, args->server, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a GETACL request
+ */
+static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_getaclargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+ uint32_t replen;
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ replen = hdr.replen + op_decode_hdr_maxsz + 1;
+ encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
+ args->acl_pages, args->acl_pgbase, args->acl_len);
+
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode a WRITE request
+ */
+static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_writeargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_write(xdr, args, &hdr);
+ req->rq_snd_buf.flags |= XDRBUF_WRITE;
+ if (args->bitmask)
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a COMMIT request
+ */
+static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_writeargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_commit(xdr, args, &hdr);
+ if (args->bitmask)
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * FSINFO request
+ */
+static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs4_fsinfo_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_fsinfo(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a PATHCONF request
+ */
+static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_pathconf_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
+ &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a STATFS request
+ */
+static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+ const struct nfs4_statfs_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
+ args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * GETATTR_BITMAP request
+ */
+static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_server_caps_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fhandle, &hdr);
+ encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
+ FATTR4_WORD0_FH_EXPIRE_TYPE|
+ FATTR4_WORD0_LINK_SUPPORT|
+ FATTR4_WORD0_SYMLINK_SUPPORT|
+ FATTR4_WORD0_ACLSUPPORT, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a RENEW request
+ */
+static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_client *clp)
+{
+ struct compound_hdr hdr = {
+ .nops = 0,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_renew(xdr, clp->cl_clientid, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a SETCLIENTID request
+ */
+static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_setclientid *sc)
+{
+ struct compound_hdr hdr = {
+ .nops = 0,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_setclientid(xdr, sc, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a SETCLIENTID_CONFIRM request
+ */
+static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_setclientid_res *arg)
+{
+ struct compound_hdr hdr = {
+ .nops = 0,
+ };
+ const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_setclientid_confirm(xdr, arg, &hdr);
+ encode_putrootfh(xdr, &hdr);
+ encode_fsinfo(xdr, lease_bitmap, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * DELEGRETURN request
+ */
+static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const struct nfs4_delegreturnargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fhandle, &hdr);
+ encode_delegreturn(xdr, args->stateid, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode FS_LOCATIONS request
+ */
+static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_fs_locations_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+ uint32_t replen;
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_lookup(xdr, args->name, &hdr);
+ replen = hdr.replen; /* get the attribute into args->page */
+ encode_fs_locations(xdr, args->bitmask, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
+ 0, PAGE_SIZE);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode SECINFO request
+ */
+static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_secinfo_arg *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->dir_fh, &hdr);
+ encode_secinfo(xdr, args->name, &hdr);
+ encode_nops(&hdr);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * EXCHANGE_ID request
+ */
+static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_exchange_id_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = args->client->cl_mvops->minor_version,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_exchange_id(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a CREATE_SESSION request
+ */
+static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_create_session_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = args->client->cl_mvops->minor_version,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_create_session(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a DESTROY_SESSION request
+ */
+static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_session *session)
+{
+ struct compound_hdr hdr = {
+ .minorversion = session->clp->cl_mvops->minor_version,
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_destroy_session(xdr, session, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a SEQUENCE request
+ */
+static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs4_sequence_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a GET_LEASE_TIME request
+ */
+static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_get_lease_time_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
+ };
+ const u32 lease_bitmap[3] = { FATTR4_WORD0_LEASE_TIME };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->la_seq_args, &hdr);
+ encode_putrootfh(xdr, &hdr);
+ encode_fsinfo(xdr, lease_bitmap, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * a RECLAIM_COMPLETE request
+ */
+static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_reclaim_complete_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args)
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_reclaim_complete(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode GETDEVICELIST request
+ */
+static void nfs4_xdr_enc_getdevicelist(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_getdevicelist_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_getdevicelist(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode GETDEVICEINFO request
+ */
+static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_getdeviceinfo_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_getdeviceinfo(xdr, args, &hdr);
+
+ /* set up reply kvec. Subtract notification bitmap max size (2)
+ * so that notification bitmap is put in xdr_buf tail */
+ xdr_inline_pages(&req->rq_rcv_buf, (hdr.replen - 2) << 2,
+ args->pdev->pages, args->pdev->pgbase,
+ args->pdev->pglen);
+
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode LAYOUTGET request
+ */
+static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutget_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutget(xdr, args, &hdr);
+
+ xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
+ args->layout.pages, 0, args->layout.pglen);
+
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode LAYOUTCOMMIT request
+ */
+static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutcommit_args *args)
+{
+ struct nfs4_layoutcommit_data *data =
+ container_of(args, struct nfs4_layoutcommit_data, args);
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutcommit(xdr, data->args.inode, args, &hdr);
+ encode_getfattr(xdr, args->bitmask, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode LAYOUTRETURN request
+ */
+static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutreturn_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+ encode_layoutreturn(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode SECINFO_NO_NAME request
+ */
+static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_secinfo_no_name_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putrootfh(xdr, &hdr);
+ encode_secinfo_no_name(xdr, args, &hdr);
+ encode_nops(&hdr);
+ return 0;
+}
+
+/*
+ * Encode TEST_STATEID request
+ */
+static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_test_stateid_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_test_stateid(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Encode FREE_STATEID request
+ */
+static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs41_free_stateid_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_free_stateid(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+ dprintk("nfs: %s: prematurely hit end of receive buffer. "
+ "Remaining buffer length is %tu words.\n",
+ func, xdr->end - xdr->p);
+}
+
+static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, *len);
+ if (unlikely(!p))
+ goto out_overflow;
+ *string = (char *)p;
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ hdr->status = be32_to_cpup(p++);
+ hdr->taglen = be32_to_cpup(p);
+
+ p = xdr_inline_decode(xdr, hdr->taglen + 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ hdr->tag = (char *)p;
+ p += XDR_QUADLEN(hdr->taglen);
+ hdr->nops = be32_to_cpup(p);
+ if (unlikely(hdr->nops < 1))
+ return nfs4_stat_to_errno(hdr->status);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+{
+ __be32 *p;
+ uint32_t opnum;
+ int32_t nfserr;
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ opnum = be32_to_cpup(p++);
+ if (opnum != expected) {
+ dprintk("nfs: Server returned operation"
+ " %d but we issued a request for %d\n",
+ opnum, expected);
+ return -EIO;
+ }
+ nfserr = be32_to_cpup(p);
+ if (nfserr != NFS_OK)
+ return nfs4_stat_to_errno(nfserr);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/* Dummy routine */
+static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs_client *clp)
+{
+ __be32 *p;
+ unsigned int strlen;
+ char *str;
+
+ p = xdr_inline_decode(xdr, 12);
+ if (likely(p))
+ return decode_opaque_inline(xdr, &strlen, &str);
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
+{
+ uint32_t bmlen;
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
+
+ bitmap[0] = bitmap[1] = bitmap[2] = 0;
+ p = xdr_inline_decode(xdr, (bmlen << 2));
+ if (unlikely(!p))
+ goto out_overflow;
+ if (bmlen > 0) {
+ bitmap[0] = be32_to_cpup(p++);
+ if (bmlen > 1) {
+ bitmap[1] = be32_to_cpup(p++);
+ if (bmlen > 2)
+ bitmap[2] = be32_to_cpup(p);
+ }
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static inline int decode_attr_length(struct xdr_stream *xdr, uint32_t *attrlen, __be32 **savep)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *attrlen = be32_to_cpup(p);
+ *savep = xdr->p;
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *bitmask)
+{
+ if (likely(bitmap[0] & FATTR4_WORD0_SUPPORTED_ATTRS)) {
+ int ret;
+ ret = decode_attr_bitmap(xdr, bitmask);
+ if (unlikely(ret < 0))
+ return ret;
+ bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS;
+ } else
+ bitmask[0] = bitmask[1] = bitmask[2] = 0;
+ dprintk("%s: bitmask=%08x:%08x:%08x\n", __func__,
+ bitmask[0], bitmask[1], bitmask[2]);
+ return 0;
+}
+
+static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *type)
+{
+ __be32 *p;
+ int ret = 0;
+
+ *type = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_TYPE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_TYPE)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *type = be32_to_cpup(p);
+ if (*type < NF4REG || *type > NF4NAMEDATTR) {
+ dprintk("%s: bad type %d\n", __func__, *type);
+ return -EIO;
+ }
+ bitmap[0] &= ~FATTR4_WORD0_TYPE;
+ ret = NFS_ATTR_FATTR_TYPE;
+ }
+ dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type]);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_fh_expire_type(struct xdr_stream *xdr,
+ uint32_t *bitmap, uint32_t *type)
+{
+ __be32 *p;
+
+ *type = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FH_EXPIRE_TYPE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_FH_EXPIRE_TYPE)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *type = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_FH_EXPIRE_TYPE;
+ }
+ dprintk("%s: expire type=0x%x\n", __func__, *type);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *change)
+{
+ __be32 *p;
+ int ret = 0;
+
+ *change = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_CHANGE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_CHANGE)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, change);
+ bitmap[0] &= ~FATTR4_WORD0_CHANGE;
+ ret = NFS_ATTR_FATTR_CHANGE;
+ }
+ dprintk("%s: change attribute=%Lu\n", __func__,
+ (unsigned long long)*change);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *size)
+{
+ __be32 *p;
+ int ret = 0;
+
+ *size = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_SIZE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_SIZE)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, size);
+ bitmap[0] &= ~FATTR4_WORD0_SIZE;
+ ret = NFS_ATTR_FATTR_SIZE;
+ }
+ dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_LINK_SUPPORT - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_LINK_SUPPORT)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
+ }
+ dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_SYMLINK_SUPPORT - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_SYMLINK_SUPPORT)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
+ }
+ dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fsid *fsid)
+{
+ __be32 *p;
+ int ret = 0;
+
+ fsid->major = 0;
+ fsid->minor = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FSID - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_FSID)) {
+ p = xdr_inline_decode(xdr, 16);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &fsid->major);
+ xdr_decode_hyper(p, &fsid->minor);
+ bitmap[0] &= ~FATTR4_WORD0_FSID;
+ ret = NFS_ATTR_FATTR_FSID;
+ }
+ dprintk("%s: fsid=(0x%Lx/0x%Lx)\n", __func__,
+ (unsigned long long)fsid->major,
+ (unsigned long long)fsid->minor);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+
+ *res = 60;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_LEASE_TIME - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_LEASE_TIME)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
+ }
+ dprintk("%s: file size=%u\n", __func__, (unsigned int)*res);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_error(struct xdr_stream *xdr, uint32_t *bitmap, int32_t *res)
+{
+ __be32 *p;
+
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_RDATTR_ERROR - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_RDATTR_ERROR)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bitmap[0] &= ~FATTR4_WORD0_RDATTR_ERROR;
+ *res = -be32_to_cpup(p);
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh)
+{
+ __be32 *p;
+ int len;
+
+ if (fh != NULL)
+ memset(fh, 0, sizeof(*fh));
+
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEHANDLE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_FILEHANDLE)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ if (len > NFS4_FHSIZE)
+ return -EIO;
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (fh != NULL) {
+ memcpy(fh->data, p, len);
+ fh->size = len;
+ }
+ bitmap[0] &= ~FATTR4_WORD0_FILEHANDLE;
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+
+ *res = ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_ACLSUPPORT - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_ACLSUPPORT)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *res = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT;
+ }
+ dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
+{
+ __be32 *p;
+ int ret = 0;
+
+ *fileid = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FILEID - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_FILEID)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, fileid);
+ bitmap[0] &= ~FATTR4_WORD0_FILEID;
+ ret = NFS_ATTR_FATTR_FILEID;
+ }
+ dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
+{
+ __be32 *p;
+ int ret = 0;
+
+ *fileid = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_MOUNTED_ON_FILEID - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, fileid);
+ bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
+ ret = NFS_ATTR_FATTR_MOUNTED_ON_FILEID;
+ }
+ dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_AVAIL - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_FILES_AVAIL)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL;
+ }
+ dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_FREE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_FILES_FREE)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ bitmap[0] &= ~FATTR4_WORD0_FILES_FREE;
+ }
+ dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FILES_TOTAL - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_FILES_TOTAL)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
+ }
+ dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
+{
+ u32 n;
+ __be32 *p;
+ int status = 0;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ n = be32_to_cpup(p);
+ if (n == 0)
+ goto root_path;
+ dprintk("pathname4: ");
+ path->ncomponents = 0;
+ while (path->ncomponents < n) {
+ struct nfs4_string *component = &path->components[path->ncomponents];
+ status = decode_opaque_inline(xdr, &component->len, &component->data);
+ if (unlikely(status != 0))
+ goto out_eio;
+ ifdebug (XDR)
+ pr_cont("%s%.*s ",
+ (path->ncomponents != n ? "/ " : ""),
+ component->len, component->data);
+ if (path->ncomponents < NFS4_PATHNAME_MAXCOMPONENTS)
+ path->ncomponents++;
+ else {
+ dprintk("cannot parse %d components in path\n", n);
+ goto out_eio;
+ }
+ }
+out:
+ return status;
+root_path:
+/* a root pathname is sent as a zero component4 */
+ path->ncomponents = 1;
+ path->components[0].len=0;
+ path->components[0].data=NULL;
+ dprintk("pathname4: /\n");
+ goto out;
+out_eio:
+ dprintk(" status %d", status);
+ status = -EIO;
+ goto out;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs4_fs_locations *res)
+{
+ int n;
+ __be32 *p;
+ int status = -EIO;
+
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_FS_LOCATIONS -1U)))
+ goto out;
+ status = 0;
+ if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS)))
+ goto out;
+ status = -EIO;
+ /* Ignore borken servers that return unrequested attrs */
+ if (unlikely(res == NULL))
+ goto out;
+ dprintk("%s: fsroot:\n", __func__);
+ status = decode_pathname(xdr, &res->fs_path);
+ if (unlikely(status != 0))
+ goto out;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ n = be32_to_cpup(p);
+ if (n <= 0)
+ goto out_eio;
+ res->nlocations = 0;
+ while (res->nlocations < n) {
+ u32 m;
+ struct nfs4_fs_location *loc = &res->locations[res->nlocations];
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ m = be32_to_cpup(p);
+
+ loc->nservers = 0;
+ dprintk("%s: servers:\n", __func__);
+ while (loc->nservers < m) {
+ struct nfs4_string *server = &loc->servers[loc->nservers];
+ status = decode_opaque_inline(xdr, &server->len, &server->data);
+ if (unlikely(status != 0))
+ goto out_eio;
+ dprintk("%s ", server->data);
+ if (loc->nservers < NFS4_FS_LOCATION_MAXSERVERS)
+ loc->nservers++;
+ else {
+ unsigned int i;
+ dprintk("%s: using first %u of %u servers "
+ "returned for location %u\n",
+ __func__,
+ NFS4_FS_LOCATION_MAXSERVERS,
+ m, res->nlocations);
+ for (i = loc->nservers; i < m; i++) {
+ unsigned int len;
+ char *data;
+ status = decode_opaque_inline(xdr, &len, &data);
+ if (unlikely(status != 0))
+ goto out_eio;
+ }
+ }
+ }
+ status = decode_pathname(xdr, &loc->rootpath);
+ if (unlikely(status != 0))
+ goto out_eio;
+ if (res->nlocations < NFS4_FS_LOCATIONS_MAXENTRIES)
+ res->nlocations++;
+ }
+ if (res->nlocations != 0)
+ status = NFS_ATTR_FATTR_V4_LOCATIONS;
+out:
+ dprintk("%s: fs_locations done, error = %d\n", __func__, status);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+out_eio:
+ status = -EIO;
+ goto out;
+}
+
+static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXFILESIZE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_MAXFILESIZE)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE;
+ }
+ dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxlink)
+{
+ __be32 *p;
+ int status = 0;
+
+ *maxlink = 1;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXLINK - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_MAXLINK)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *maxlink = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_MAXLINK;
+ }
+ dprintk("%s: maxlink=%u\n", __func__, *maxlink);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *maxname)
+{
+ __be32 *p;
+ int status = 0;
+
+ *maxname = 1024;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXNAME - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_MAXNAME)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *maxname = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_MAXNAME;
+ }
+ dprintk("%s: maxname=%u\n", __func__, *maxname);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 1024;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXREAD - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_MAXREAD)) {
+ uint64_t maxread;
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &maxread);
+ if (maxread > 0x7FFFFFFF)
+ maxread = 0x7FFFFFFF;
+ *res = (uint32_t)maxread;
+ bitmap[0] &= ~FATTR4_WORD0_MAXREAD;
+ }
+ dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 1024;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_MAXWRITE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_MAXWRITE)) {
+ uint64_t maxwrite;
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &maxwrite);
+ if (maxwrite > 0x7FFFFFFF)
+ maxwrite = 0x7FFFFFFF;
+ *res = (uint32_t)maxwrite;
+ bitmap[0] &= ~FATTR4_WORD0_MAXWRITE;
+ }
+ dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, umode_t *mode)
+{
+ uint32_t tmp;
+ __be32 *p;
+ int ret = 0;
+
+ *mode = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_MODE - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_MODE)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ tmp = be32_to_cpup(p);
+ *mode = tmp & ~S_IFMT;
+ bitmap[1] &= ~FATTR4_WORD1_MODE;
+ ret = NFS_ATTR_FATTR_MODE;
+ }
+ dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *nlink)
+{
+ __be32 *p;
+ int ret = 0;
+
+ *nlink = 1;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_NUMLINKS - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_NUMLINKS)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *nlink = be32_to_cpup(p);
+ bitmap[1] &= ~FATTR4_WORD1_NUMLINKS;
+ ret = NFS_ATTR_FATTR_NLINK;
+ }
+ dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap,
+ const struct nfs_server *server, uint32_t *uid,
+ struct nfs4_string *owner_name)
+{
+ uint32_t len;
+ __be32 *p;
+ int ret = 0;
+
+ *uid = -2;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_OWNER)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (owner_name != NULL) {
+ owner_name->data = kmemdup(p, len, GFP_NOWAIT);
+ if (owner_name->data != NULL) {
+ owner_name->len = len;
+ ret = NFS_ATTR_FATTR_OWNER_NAME;
+ }
+ } else if (len < XDR_MAX_NETOBJ) {
+ if (nfs_map_name_to_uid(server, (char *)p, len, uid) == 0)
+ ret = NFS_ATTR_FATTR_OWNER;
+ else
+ dprintk("%s: nfs_map_name_to_uid failed!\n",
+ __func__);
+ } else
+ dprintk("%s: name too long (%u)!\n",
+ __func__, len);
+ bitmap[1] &= ~FATTR4_WORD1_OWNER;
+ }
+ dprintk("%s: uid=%d\n", __func__, (int)*uid);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap,
+ const struct nfs_server *server, uint32_t *gid,
+ struct nfs4_string *group_name)
+{
+ uint32_t len;
+ __be32 *p;
+ int ret = 0;
+
+ *gid = -2;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_OWNER_GROUP - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_OWNER_GROUP)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (group_name != NULL) {
+ group_name->data = kmemdup(p, len, GFP_NOWAIT);
+ if (group_name->data != NULL) {
+ group_name->len = len;
+ ret = NFS_ATTR_FATTR_GROUP_NAME;
+ }
+ } else if (len < XDR_MAX_NETOBJ) {
+ if (nfs_map_group_to_gid(server, (char *)p, len, gid) == 0)
+ ret = NFS_ATTR_FATTR_GROUP;
+ else
+ dprintk("%s: nfs_map_group_to_gid failed!\n",
+ __func__);
+ } else
+ dprintk("%s: name too long (%u)!\n",
+ __func__, len);
+ bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP;
+ }
+ dprintk("%s: gid=%d\n", __func__, (int)*gid);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rdev)
+{
+ uint32_t major = 0, minor = 0;
+ __be32 *p;
+ int ret = 0;
+
+ *rdev = MKDEV(0,0);
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_RAWDEV - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_RAWDEV)) {
+ dev_t tmp;
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ major = be32_to_cpup(p++);
+ minor = be32_to_cpup(p);
+ tmp = MKDEV(major, minor);
+ if (MAJOR(tmp) == major && MINOR(tmp) == minor)
+ *rdev = tmp;
+ bitmap[1] &= ~ FATTR4_WORD1_RAWDEV;
+ ret = NFS_ATTR_FATTR_RDEV;
+ }
+ dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_AVAIL - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_SPACE_AVAIL)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL;
+ }
+ dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_FREE - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_SPACE_FREE)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE;
+ }
+ dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *res)
+{
+ __be32 *p;
+ int status = 0;
+
+ *res = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_TOTAL - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_SPACE_TOTAL)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, res);
+ bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL;
+ }
+ dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *used)
+{
+ __be32 *p;
+ int ret = 0;
+
+ *used = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_SPACE_USED - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_SPACE_USED)) {
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, used);
+ bitmap[1] &= ~FATTR4_WORD1_SPACE_USED;
+ ret = NFS_ATTR_FATTR_SPACE_USED;
+ }
+ dprintk("%s: space used=%Lu\n", __func__,
+ (unsigned long long)*used);
+ return ret;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_time(struct xdr_stream *xdr, struct timespec *time)
+{
+ __be32 *p;
+ uint64_t sec;
+ uint32_t nsec;
+
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &sec);
+ nsec = be32_to_cpup(p);
+ time->tv_sec = (time_t)sec;
+ time->tv_nsec = (long)nsec;
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
+{
+ int status = 0;
+
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_ACCESS - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_TIME_ACCESS)) {
+ status = decode_attr_time(xdr, time);
+ if (status == 0)
+ status = NFS_ATTR_FATTR_ATIME;
+ bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS;
+ }
+ dprintk("%s: atime=%ld\n", __func__, (long)time->tv_sec);
+ return status;
+}
+
+static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
+{
+ int status = 0;
+
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_METADATA - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_TIME_METADATA)) {
+ status = decode_attr_time(xdr, time);
+ if (status == 0)
+ status = NFS_ATTR_FATTR_CTIME;
+ bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA;
+ }
+ dprintk("%s: ctime=%ld\n", __func__, (long)time->tv_sec);
+ return status;
+}
+
+static int decode_attr_time_delta(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct timespec *time)
+{
+ int status = 0;
+
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_DELTA - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_TIME_DELTA)) {
+ status = decode_attr_time(xdr, time);
+ bitmap[1] &= ~FATTR4_WORD1_TIME_DELTA;
+ }
+ dprintk("%s: time_delta=%ld %ld\n", __func__, (long)time->tv_sec,
+ (long)time->tv_nsec);
+ return status;
+}
+
+static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, struct timespec *time)
+{
+ int status = 0;
+
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_TIME_MODIFY - 1U)))
+ return -EIO;
+ if (likely(bitmap[1] & FATTR4_WORD1_TIME_MODIFY)) {
+ status = decode_attr_time(xdr, time);
+ if (status == 0)
+ status = NFS_ATTR_FATTR_MTIME;
+ bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY;
+ }
+ dprintk("%s: mtime=%ld\n", __func__, (long)time->tv_sec);
+ return status;
+}
+
+static int verify_attr_len(struct xdr_stream *xdr, __be32 *savep, uint32_t attrlen)
+{
+ unsigned int attrwords = XDR_QUADLEN(attrlen);
+ unsigned int nwords = xdr->p - savep;
+
+ if (unlikely(attrwords != nwords)) {
+ dprintk("%s: server returned incorrect attribute length: "
+ "%u %c %u\n",
+ __func__,
+ attrwords << 2,
+ (attrwords < nwords) ? '<' : '>',
+ nwords << 2);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int decode_change_info(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 20);
+ if (unlikely(!p))
+ goto out_overflow;
+ cinfo->atomic = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &cinfo->before);
+ xdr_decode_hyper(p, &cinfo->after);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_access(struct xdr_stream *xdr, struct nfs4_accessres *access)
+{
+ __be32 *p;
+ uint32_t supp, acc;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_ACCESS);
+ if (status)
+ return status;
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ supp = be32_to_cpup(p++);
+ acc = be32_to_cpup(p);
+ access->supported = supp;
+ access->access = acc;
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_opaque_fixed(struct xdr_stream *xdr, void *buf, size_t len)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, len);
+ if (likely(p)) {
+ memcpy(buf, p, len);
+ return 0;
+ }
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ return decode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
+}
+
+static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_CLOSE);
+ if (status != -EIO)
+ nfs_increment_open_seqid(status, res->seqid);
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
+}
+
+static int decode_verifier(struct xdr_stream *xdr, void *verifier)
+{
+ return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE);
+}
+
+static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_COMMIT);
+ if (!status)
+ status = decode_verifier(xdr, res->verf->verifier);
+ return status;
+}
+
+static int decode_create(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+{
+ __be32 *p;
+ uint32_t bmlen;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_CREATE);
+ if (status)
+ return status;
+ if ((status = decode_change_info(xdr, cinfo)))
+ return status;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (likely(p))
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_res *res)
+{
+ __be32 *savep;
+ uint32_t attrlen, bitmap[3] = {0};
+ int status;
+
+ if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_supported(xdr, bitmap, res->attr_bitmask)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_fh_expire_type(xdr, bitmap,
+ &res->fh_expire_type)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_link_support(xdr, bitmap, &res->has_links)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_symlink_support(xdr, bitmap, &res->has_symlinks)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0)
+ goto xdr_error;
+ status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+ dprintk("%s: xdr returned %d!\n", __func__, -status);
+ return status;
+}
+
+static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
+{
+ __be32 *savep;
+ uint32_t attrlen, bitmap[3] = {0};
+ int status;
+
+ if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+ goto xdr_error;
+
+ if ((status = decode_attr_files_avail(xdr, bitmap, &fsstat->afiles)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_files_free(xdr, bitmap, &fsstat->ffiles)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_files_total(xdr, bitmap, &fsstat->tfiles)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_space_avail(xdr, bitmap, &fsstat->abytes)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_space_free(xdr, bitmap, &fsstat->fbytes)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_space_total(xdr, bitmap, &fsstat->tbytes)) != 0)
+ goto xdr_error;
+
+ status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+ dprintk("%s: xdr returned %d!\n", __func__, -status);
+ return status;
+}
+
+static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf)
+{
+ __be32 *savep;
+ uint32_t attrlen, bitmap[3] = {0};
+ int status;
+
+ if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+ goto xdr_error;
+
+ if ((status = decode_attr_maxlink(xdr, bitmap, &pathconf->max_link)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_maxname(xdr, bitmap, &pathconf->max_namelen)) != 0)
+ goto xdr_error;
+
+ status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+ dprintk("%s: xdr returned %d!\n", __func__, -status);
+ return status;
+}
+
+static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
+ struct nfs_fattr *fattr, struct nfs_fh *fh,
+ struct nfs4_fs_locations *fs_loc,
+ const struct nfs_server *server)
+{
+ int status;
+ umode_t fmode = 0;
+ uint32_t type;
+ int32_t err;
+
+ status = decode_attr_type(xdr, bitmap, &type);
+ if (status < 0)
+ goto xdr_error;
+ fattr->mode = 0;
+ if (status != 0) {
+ fattr->mode |= nfs_type2fmt[type];
+ fattr->valid |= status;
+ }
+
+ status = decode_attr_change(xdr, bitmap, &fattr->change_attr);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_size(xdr, bitmap, &fattr->size);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_fsid(xdr, bitmap, &fattr->fsid);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ err = 0;
+ status = decode_attr_error(xdr, bitmap, &err);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_attr_filehandle(xdr, bitmap, fh);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_attr_fileid(xdr, bitmap, &fattr->fileid);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_fs_locations(xdr, bitmap, fs_loc);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_mode(xdr, bitmap, &fmode);
+ if (status < 0)
+ goto xdr_error;
+ if (status != 0) {
+ fattr->mode |= fmode;
+ fattr->valid |= status;
+ }
+
+ status = decode_attr_nlink(xdr, bitmap, &fattr->nlink);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_owner(xdr, bitmap, server, &fattr->uid, fattr->owner_name);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_group(xdr, bitmap, server, &fattr->gid, fattr->group_name);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_rdev(xdr, bitmap, &fattr->rdev);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_space_used(xdr, bitmap, &fattr->du.nfs3.used);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_time_access(xdr, bitmap, &fattr->atime);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_time_metadata(xdr, bitmap, &fattr->ctime);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_time_modify(xdr, bitmap, &fattr->mtime);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+ status = decode_attr_mounted_on_fileid(xdr, bitmap, &fattr->mounted_on_fileid);
+ if (status < 0)
+ goto xdr_error;
+ fattr->valid |= status;
+
+xdr_error:
+ dprintk("%s: xdr returned %d\n", __func__, -status);
+ return status;
+}
+
+static int decode_getfattr_generic(struct xdr_stream *xdr, struct nfs_fattr *fattr,
+ struct nfs_fh *fh, struct nfs4_fs_locations *fs_loc,
+ const struct nfs_server *server)
+{
+ __be32 *savep;
+ uint32_t attrlen,
+ bitmap[3] = {0};
+ int status;
+
+ status = decode_op_hdr(xdr, OP_GETATTR);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_attr_bitmap(xdr, bitmap);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_attr_length(xdr, &attrlen, &savep);
+ if (status < 0)
+ goto xdr_error;
+
+ status = decode_getfattr_attrs(xdr, bitmap, fattr, fh, fs_loc, server);
+ if (status < 0)
+ goto xdr_error;
+
+ status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+ dprintk("%s: xdr returned %d\n", __func__, -status);
+ return status;
+}
+
+static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr,
+ const struct nfs_server *server)
+{
+ return decode_getfattr_generic(xdr, fattr, NULL, NULL, server);
+}
+
+/*
+ * Decode potentially multiple layout types. Currently we only support
+ * one layout driver per file system.
+ */
+static int decode_first_pnfs_layout_type(struct xdr_stream *xdr,
+ uint32_t *layouttype)
+{
+ uint32_t *p;
+ int num;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ num = be32_to_cpup(p);
+
+ /* pNFS is not supported by the underlying file system */
+ if (num == 0) {
+ *layouttype = 0;
+ return 0;
+ }
+ if (num > 1)
+ printk(KERN_INFO "NFS: %s: Warning: Multiple pNFS layout "
+ "drivers per filesystem not supported\n", __func__);
+
+ /* Decode and set first layout type, move xdr->p past unused types */
+ p = xdr_inline_decode(xdr, num * 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ *layouttype = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+/*
+ * The type of file system exported.
+ * Note we must ensure that layouttype is set in any non-error case.
+ */
+static int decode_attr_pnfstype(struct xdr_stream *xdr, uint32_t *bitmap,
+ uint32_t *layouttype)
+{
+ int status = 0;
+
+ dprintk("%s: bitmap is %x\n", __func__, bitmap[1]);
+ if (unlikely(bitmap[1] & (FATTR4_WORD1_FS_LAYOUT_TYPES - 1U)))
+ return -EIO;
+ if (bitmap[1] & FATTR4_WORD1_FS_LAYOUT_TYPES) {
+ status = decode_first_pnfs_layout_type(xdr, layouttype);
+ bitmap[1] &= ~FATTR4_WORD1_FS_LAYOUT_TYPES;
+ } else
+ *layouttype = 0;
+ return status;
+}
+
+/*
+ * The prefered block size for layout directed io
+ */
+static int decode_attr_layout_blksize(struct xdr_stream *xdr, uint32_t *bitmap,
+ uint32_t *res)
+{
+ __be32 *p;
+
+ dprintk("%s: bitmap is %x\n", __func__, bitmap[2]);
+ *res = 0;
+ if (bitmap[2] & FATTR4_WORD2_LAYOUT_BLKSIZE) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p)) {
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+ }
+ *res = be32_to_cpup(p);
+ bitmap[2] &= ~FATTR4_WORD2_LAYOUT_BLKSIZE;
+ }
+ return 0;
+}
+
+static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
+{
+ __be32 *savep;
+ uint32_t attrlen, bitmap[3];
+ int status;
+
+ if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+ goto xdr_error;
+
+ fsinfo->rtmult = fsinfo->wtmult = 512; /* ??? */
+
+ if ((status = decode_attr_lease_time(xdr, bitmap, &fsinfo->lease_time)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_maxfilesize(xdr, bitmap, &fsinfo->maxfilesize)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_maxread(xdr, bitmap, &fsinfo->rtmax)) != 0)
+ goto xdr_error;
+ fsinfo->rtpref = fsinfo->dtpref = fsinfo->rtmax;
+ if ((status = decode_attr_maxwrite(xdr, bitmap, &fsinfo->wtmax)) != 0)
+ goto xdr_error;
+ fsinfo->wtpref = fsinfo->wtmax;
+ status = decode_attr_time_delta(xdr, bitmap, &fsinfo->time_delta);
+ if (status != 0)
+ goto xdr_error;
+ status = decode_attr_pnfstype(xdr, bitmap, &fsinfo->layouttype);
+ if (status != 0)
+ goto xdr_error;
+ status = decode_attr_layout_blksize(xdr, bitmap, &fsinfo->blksize);
+ if (status)
+ goto xdr_error;
+
+ status = verify_attr_len(xdr, savep, attrlen);
+xdr_error:
+ dprintk("%s: xdr returned %d!\n", __func__, -status);
+ return status;
+}
+
+static int decode_getfh(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+ __be32 *p;
+ uint32_t len;
+ int status;
+
+ /* Zero handle first to allow comparisons */
+ memset(fh, 0, sizeof(*fh));
+
+ status = decode_op_hdr(xdr, OP_GETFH);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ if (len > NFS4_FHSIZE)
+ return -EIO;
+ fh->size = len;
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ memcpy(fh->data, p, len);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_LINK);
+ if (status)
+ return status;
+ return decode_change_info(xdr, cinfo);
+}
+
+/*
+ * We create the owner, so we know a proper owner.id length is 4.
+ */
+static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
+{
+ uint64_t offset, length, clientid;
+ __be32 *p;
+ uint32_t namelen, type;
+
+ p = xdr_inline_decode(xdr, 32); /* read 32 bytes */
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &offset); /* read 2 8-byte long words */
+ p = xdr_decode_hyper(p, &length);
+ type = be32_to_cpup(p++); /* 4 byte read */
+ if (fl != NULL) { /* manipulate file lock */
+ fl->fl_start = (loff_t)offset;
+ fl->fl_end = fl->fl_start + (loff_t)length - 1;
+ if (length == ~(uint64_t)0)
+ fl->fl_end = OFFSET_MAX;
+ fl->fl_type = F_WRLCK;
+ if (type & 1)
+ fl->fl_type = F_RDLCK;
+ fl->fl_pid = 0;
+ }
+ p = xdr_decode_hyper(p, &clientid); /* read 8 bytes */
+ namelen = be32_to_cpup(p); /* read 4 bytes */ /* have read all 32 bytes now */
+ p = xdr_inline_decode(xdr, namelen); /* variable size field */
+ if (likely(p))
+ return -NFS4ERR_DENIED;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_LOCK);
+ if (status == -EIO)
+ goto out;
+ if (status == 0) {
+ status = decode_stateid(xdr, &res->stateid);
+ if (unlikely(status))
+ goto out;
+ } else if (status == -NFS4ERR_DENIED)
+ status = decode_lock_denied(xdr, NULL);
+ if (res->open_seqid != NULL)
+ nfs_increment_open_seqid(status, res->open_seqid);
+ nfs_increment_lock_seqid(status, res->lock_seqid);
+out:
+ return status;
+}
+
+static int decode_lockt(struct xdr_stream *xdr, struct nfs_lockt_res *res)
+{
+ int status;
+ status = decode_op_hdr(xdr, OP_LOCKT);
+ if (status == -NFS4ERR_DENIED)
+ return decode_lock_denied(xdr, res->denied);
+ return status;
+}
+
+static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_LOCKU);
+ if (status != -EIO)
+ nfs_increment_lock_seqid(status, res->seqid);
+ if (status == 0)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
+}
+
+static int decode_release_lockowner(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_RELEASE_LOCKOWNER);
+}
+
+static int decode_lookup(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_LOOKUP);
+}
+
+/* This is too sick! */
+static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize)
+{
+ __be32 *p;
+ uint32_t limit_type, nblocks, blocksize;
+
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ limit_type = be32_to_cpup(p++);
+ switch (limit_type) {
+ case 1:
+ xdr_decode_hyper(p, maxsize);
+ break;
+ case 2:
+ nblocks = be32_to_cpup(p++);
+ blocksize = be32_to_cpup(p);
+ *maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
+{
+ __be32 *p;
+ uint32_t delegation_type;
+ int status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ delegation_type = be32_to_cpup(p);
+ if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
+ res->delegation_type = 0;
+ return 0;
+ }
+ status = decode_stateid(xdr, &res->delegation);
+ if (unlikely(status))
+ return status;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->do_recall = be32_to_cpup(p);
+
+ switch (delegation_type) {
+ case NFS4_OPEN_DELEGATE_READ:
+ res->delegation_type = FMODE_READ;
+ break;
+ case NFS4_OPEN_DELEGATE_WRITE:
+ res->delegation_type = FMODE_WRITE|FMODE_READ;
+ if (decode_space_limit(xdr, &res->maxsize) < 0)
+ return -EIO;
+ }
+ return decode_ace(xdr, NULL, res->server->nfs_client);
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
+{
+ __be32 *p;
+ uint32_t savewords, bmlen, i;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_OPEN);
+ if (status != -EIO)
+ nfs_increment_open_seqid(status, res->seqid);
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ if (unlikely(status))
+ return status;
+
+ decode_change_info(xdr, &res->cinfo);
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->rflags = be32_to_cpup(p++);
+ bmlen = be32_to_cpup(p);
+ if (bmlen > 10)
+ goto xdr_error;
+
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (unlikely(!p))
+ goto out_overflow;
+ savewords = min_t(uint32_t, bmlen, NFS4_BITMAP_SIZE);
+ for (i = 0; i < savewords; ++i)
+ res->attrset[i] = be32_to_cpup(p++);
+ for (; i < NFS4_BITMAP_SIZE; i++)
+ res->attrset[i] = 0;
+
+ return decode_delegation(xdr, res);
+xdr_error:
+ dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen);
+ return -EIO;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmres *res)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
+ if (status != -EIO)
+ nfs_increment_open_seqid(status, res->seqid);
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
+}
+
+static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *res)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
+ if (status != -EIO)
+ nfs_increment_open_seqid(status, res->seqid);
+ if (!status)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
+}
+
+static int decode_putfh(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_PUTFH);
+}
+
+static int decode_putrootfh(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_PUTROOTFH);
+}
+
+static int decode_read(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_readres *res)
+{
+ struct kvec *iov = req->rq_rcv_buf.head;
+ __be32 *p;
+ uint32_t count, eof, recvd, hdrlen;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_READ);
+ if (status)
+ return status;
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ eof = be32_to_cpup(p++);
+ count = be32_to_cpup(p);
+ hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
+ recvd = req->rq_rcv_buf.len - hdrlen;
+ if (count > recvd) {
+ dprintk("NFS: server cheating in read reply: "
+ "count %u > recvd %u\n", count, recvd);
+ count = recvd;
+ eof = 0;
+ }
+ xdr_read_pages(xdr, count);
+ res->eof = eof;
+ res->count = count;
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs4_readdir_res *readdir)
+{
+ struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+ struct kvec *iov = rcvbuf->head;
+ size_t hdrlen;
+ u32 recvd, pglen = rcvbuf->page_len;
+ int status;
+ __be32 verf[2];
+
+ status = decode_op_hdr(xdr, OP_READDIR);
+ if (!status)
+ status = decode_verifier(xdr, readdir->verifier.data);
+ if (unlikely(status))
+ return status;
+ memcpy(verf, readdir->verifier.data, sizeof(verf));
+ dprintk("%s: verifier = %08x:%08x\n",
+ __func__, verf[0], verf[1]);
+
+ hdrlen = (char *) xdr->p - (char *) iov->iov_base;
+ recvd = rcvbuf->len - hdrlen;
+ if (pglen > recvd)
+ pglen = recvd;
+ xdr_read_pages(xdr, pglen);
+
+
+ return pglen;
+}
+
+static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
+{
+ struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+ struct kvec *iov = rcvbuf->head;
+ size_t hdrlen;
+ u32 len, recvd;
+ __be32 *p;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_READLINK);
+ if (status)
+ return status;
+
+ /* Convert length of symlink */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ if (len >= rcvbuf->page_len || len <= 0) {
+ dprintk("nfs: server returned giant symlink!\n");
+ return -ENAMETOOLONG;
+ }
+ hdrlen = (char *) xdr->p - (char *) iov->iov_base;
+ recvd = req->rq_rcv_buf.len - hdrlen;
+ if (recvd < len) {
+ dprintk("NFS: server cheating in readlink reply: "
+ "count %u > recvd %u\n", len, recvd);
+ return -EIO;
+ }
+ xdr_read_pages(xdr, len);
+ /*
+ * The XDR encode routine has set things up so that
+ * the link text will be copied directly into the
+ * buffer. We just have to do overflow-checking,
+ * and and null-terminate the text (the VFS expects
+ * null-termination).
+ */
+ xdr_terminate_string(rcvbuf, len);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_remove(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_REMOVE);
+ if (status)
+ goto out;
+ status = decode_change_info(xdr, cinfo);
+out:
+ return status;
+}
+
+static int decode_rename(struct xdr_stream *xdr, struct nfs4_change_info *old_cinfo,
+ struct nfs4_change_info *new_cinfo)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_RENAME);
+ if (status)
+ goto out;
+ if ((status = decode_change_info(xdr, old_cinfo)))
+ goto out;
+ status = decode_change_info(xdr, new_cinfo);
+out:
+ return status;
+}
+
+static int decode_renew(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_RENEW);
+}
+
+static int
+decode_restorefh(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_RESTOREFH);
+}
+
+static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
+ struct nfs_getaclres *res)
+{
+ __be32 *savep, *bm_p;
+ uint32_t attrlen,
+ bitmap[3] = {0};
+ struct kvec *iov = req->rq_rcv_buf.head;
+ int status;
+ size_t page_len = xdr->buf->page_len;
+
+ res->acl_len = 0;
+ if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
+ goto out;
+
+ bm_p = xdr->p;
+ res->acl_data_offset = be32_to_cpup(bm_p) + 2;
+ res->acl_data_offset <<= 2;
+ /* Check if the acl data starts beyond the allocated buffer */
+ if (res->acl_data_offset > page_len)
+ return -ERANGE;
+
+ if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
+ goto out;
+ if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
+ goto out;
+
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
+ size_t hdrlen;
+
+ /* The bitmap (xdr len + bitmaps) and the attr xdr len words
+ * are stored with the acl data to handle the problem of
+ * variable length bitmaps.*/
+ xdr->p = bm_p;
+
+ /* We ignore &savep and don't do consistency checks on
+ * the attr length. Let userspace figure it out.... */
+ hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base;
+ attrlen += res->acl_data_offset;
+ if (attrlen > page_len) {
+ if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
+ /* getxattr interface called with a NULL buf */
+ res->acl_len = attrlen;
+ goto out;
+ }
+ dprintk("NFS: acl reply: attrlen %u > page_len %zu\n",
+ attrlen, page_len);
+ return -EINVAL;
+ }
+ xdr_read_pages(xdr, attrlen);
+ res->acl_len = attrlen;
+ } else
+ status = -EOPNOTSUPP;
+
+out:
+ return status;
+}
+
+static int
+decode_savefh(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_SAVEFH);
+}
+
+static int decode_setattr(struct xdr_stream *xdr)
+{
+ __be32 *p;
+ uint32_t bmlen;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_SETATTR);
+ if (status)
+ return status;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ bmlen = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, bmlen << 2);
+ if (likely(p))
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_setclientid(struct xdr_stream *xdr, struct nfs4_setclientid_res *res)
+{
+ __be32 *p;
+ uint32_t opnum;
+ int32_t nfserr;
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ opnum = be32_to_cpup(p++);
+ if (opnum != OP_SETCLIENTID) {
+ dprintk("nfs: decode_setclientid: Server returned operation"
+ " %d\n", opnum);
+ return -EIO;
+ }
+ nfserr = be32_to_cpup(p);
+ if (nfserr == NFS_OK) {
+ p = xdr_inline_decode(xdr, 8 + NFS4_VERIFIER_SIZE);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &res->clientid);
+ memcpy(res->confirm.data, p, NFS4_VERIFIER_SIZE);
+ } else if (nfserr == NFSERR_CLID_INUSE) {
+ uint32_t len;
+
+ /* skip netid string */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ /* skip uaddr string */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ p = xdr_inline_decode(xdr, len);
+ if (unlikely(!p))
+ goto out_overflow;
+ return -NFSERR_CLID_INUSE;
+ } else
+ return nfs4_stat_to_errno(nfserr);
+
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_setclientid_confirm(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_SETCLIENTID_CONFIRM);
+}
+
+static int decode_write(struct xdr_stream *xdr, struct nfs_writeres *res)
+{
+ __be32 *p;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_WRITE);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 16);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->count = be32_to_cpup(p++);
+ res->verf->committed = be32_to_cpup(p++);
+ memcpy(res->verf->verifier, p, NFS4_VERIFIER_SIZE);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_delegreturn(struct xdr_stream *xdr)
+{
+ return decode_op_hdr(xdr, OP_DELEGRETURN);
+}
+
+static int decode_secinfo_gss(struct xdr_stream *xdr, struct nfs4_secinfo_flavor *flavor)
+{
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ flavor->gss.sec_oid4.len = be32_to_cpup(p);
+ if (flavor->gss.sec_oid4.len > GSS_OID_MAX_LEN)
+ goto out_err;
+
+ p = xdr_inline_decode(xdr, flavor->gss.sec_oid4.len);
+ if (unlikely(!p))
+ goto out_overflow;
+ memcpy(flavor->gss.sec_oid4.data, p, flavor->gss.sec_oid4.len);
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ flavor->gss.qop4 = be32_to_cpup(p++);
+ flavor->gss.service = be32_to_cpup(p);
+
+ return 0;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+out_err:
+ return -EINVAL;
+}
+
+static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
+{
+ struct nfs4_secinfo_flavor *sec_flavor;
+ int status;
+ __be32 *p;
+ int i, num_flavors;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ res->flavors->num_flavors = 0;
+ num_flavors = be32_to_cpup(p);
+
+ for (i = 0; i < num_flavors; i++) {
+ sec_flavor = &res->flavors->flavors[i];
+ if ((char *)&sec_flavor[1] - (char *)res->flavors > PAGE_SIZE)
+ break;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ sec_flavor->flavor = be32_to_cpup(p);
+
+ if (sec_flavor->flavor == RPC_AUTH_GSS) {
+ status = decode_secinfo_gss(xdr, sec_flavor);
+ if (status)
+ goto out;
+ }
+ res->flavors->num_flavors++;
+ }
+
+ status = 0;
+out:
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
+{
+ int status = decode_op_hdr(xdr, OP_SECINFO);
+ if (status)
+ return status;
+ return decode_secinfo_common(xdr, res);
+}
+
+#if defined(CONFIG_NFS_V4_1)
+static int decode_secinfo_no_name(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
+{
+ int status = decode_op_hdr(xdr, OP_SECINFO_NO_NAME);
+ if (status)
+ return status;
+ return decode_secinfo_common(xdr, res);
+}
+
+static int decode_exchange_id(struct xdr_stream *xdr,
+ struct nfs41_exchange_id_res *res)
+{
+ __be32 *p;
+ uint32_t dummy;
+ char *dummy_str;
+ int status;
+ struct nfs_client *clp = res->client;
+ uint32_t impl_id_count;
+
+ status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ xdr_decode_hyper(p, &clp->cl_clientid);
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ clp->cl_seqid = be32_to_cpup(p++);
+ clp->cl_exchange_flags = be32_to_cpup(p++);
+
+ /* We ask for SP4_NONE */
+ dummy = be32_to_cpup(p);
+ if (dummy != SP4_NONE)
+ return -EIO;
+
+ /* Throw away minor_id */
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ /* Throw away Major id */
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
+
+ /* Save server_scope */
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
+
+ if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
+ return -EIO;
+
+ memcpy(res->server_scope->server_scope, dummy_str, dummy);
+ res->server_scope->server_scope_sz = dummy;
+
+ /* Implementation Id */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ impl_id_count = be32_to_cpup(p++);
+
+ if (impl_id_count) {
+ /* nii_domain */
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
+ if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
+ return -EIO;
+ memcpy(res->impl_id->domain, dummy_str, dummy);
+
+ /* nii_name */
+ status = decode_opaque_inline(xdr, &dummy, &dummy_str);
+ if (unlikely(status))
+ return status;
+ if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
+ return -EIO;
+ memcpy(res->impl_id->name, dummy_str, dummy);
+
+ /* nii_date */
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &res->impl_id->date.seconds);
+ res->impl_id->date.nseconds = be32_to_cpup(p);
+
+ /* if there's more than one entry, ignore the rest */
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_chan_attrs(struct xdr_stream *xdr,
+ struct nfs4_channel_attrs *attrs)
+{
+ __be32 *p;
+ u32 nr_attrs, val;
+
+ p = xdr_inline_decode(xdr, 28);
+ if (unlikely(!p))
+ goto out_overflow;
+ val = be32_to_cpup(p++); /* headerpadsz */
+ if (val)
+ return -EINVAL; /* no support for header padding yet */
+ attrs->max_rqst_sz = be32_to_cpup(p++);
+ attrs->max_resp_sz = be32_to_cpup(p++);
+ attrs->max_resp_sz_cached = be32_to_cpup(p++);
+ attrs->max_ops = be32_to_cpup(p++);
+ attrs->max_reqs = be32_to_cpup(p++);
+ nr_attrs = be32_to_cpup(p);
+ if (unlikely(nr_attrs > 1)) {
+ printk(KERN_WARNING "NFS: %s: Invalid rdma channel attrs "
+ "count %u\n", __func__, nr_attrs);
+ return -EINVAL;
+ }
+ if (nr_attrs == 1) {
+ p = xdr_inline_decode(xdr, 4); /* skip rdma_attrs */
+ if (unlikely(!p))
+ goto out_overflow;
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
+{
+ return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
+}
+
+static int decode_create_session(struct xdr_stream *xdr,
+ struct nfs41_create_session_res *res)
+{
+ __be32 *p;
+ int status;
+ struct nfs_client *clp = res->client;
+ struct nfs4_session *session = clp->cl_session;
+
+ status = decode_op_hdr(xdr, OP_CREATE_SESSION);
+ if (!status)
+ status = decode_sessionid(xdr, &session->sess_id);
+ if (unlikely(status))
+ return status;
+
+ /* seqid, flags */
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ clp->cl_seqid = be32_to_cpup(p++);
+ session->flags = be32_to_cpup(p);
+
+ /* Channel attributes */
+ status = decode_chan_attrs(xdr, &session->fc_attrs);
+ if (!status)
+ status = decode_chan_attrs(xdr, &session->bc_attrs);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_DESTROY_SESSION);
+}
+
+static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
+{
+ return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+static int decode_sequence(struct xdr_stream *xdr,
+ struct nfs4_sequence_res *res,
+ struct rpc_rqst *rqstp)
+{
+#if defined(CONFIG_NFS_V4_1)
+ struct nfs4_sessionid id;
+ u32 dummy;
+ int status;
+ __be32 *p;
+
+ if (!res->sr_session)
+ return 0;
+
+ status = decode_op_hdr(xdr, OP_SEQUENCE);
+ if (!status)
+ status = decode_sessionid(xdr, &id);
+ if (unlikely(status))
+ goto out_err;
+
+ /*
+ * If the server returns different values for sessionID, slotID or
+ * sequence number, the server is looney tunes.
+ */
+ status = -EREMOTEIO;
+
+ if (memcmp(id.data, res->sr_session->sess_id.data,
+ NFS4_MAX_SESSIONID_LEN)) {
+ dprintk("%s Invalid session id\n", __func__);
+ goto out_err;
+ }
+
+ p = xdr_inline_decode(xdr, 20);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ /* seqid */
+ dummy = be32_to_cpup(p++);
+ if (dummy != res->sr_slot->seq_nr) {
+ dprintk("%s Invalid sequence number\n", __func__);
+ goto out_err;
+ }
+ /* slot id */
+ dummy = be32_to_cpup(p++);
+ if (dummy != res->sr_slot - res->sr_session->fc_slot_table.slots) {
+ dprintk("%s Invalid slot id\n", __func__);
+ goto out_err;
+ }
+ /* highest slot id - currently not processed */
+ dummy = be32_to_cpup(p++);
+ /* target highest slot id - currently not processed */
+ dummy = be32_to_cpup(p++);
+ /* result flags */
+ res->sr_status_flags = be32_to_cpup(p);
+ status = 0;
+out_err:
+ res->sr_status = status;
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ status = -EIO;
+ goto out_err;
+#else /* CONFIG_NFS_V4_1 */
+ return 0;
+#endif /* CONFIG_NFS_V4_1 */
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * TODO: Need to handle case when EOF != true;
+ */
+static int decode_getdevicelist(struct xdr_stream *xdr,
+ struct pnfs_devicelist *res)
+{
+ __be32 *p;
+ int status, i;
+ struct nfs_writeverf verftemp;
+
+ status = decode_op_hdr(xdr, OP_GETDEVICELIST);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 8 + 8 + 4);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ /* TODO: Skip cookie for now */
+ p += 2;
+
+ /* Read verifier */
+ p = xdr_decode_opaque_fixed(p, verftemp.verifier, NFS4_VERIFIER_SIZE);
+
+ res->num_devs = be32_to_cpup(p);
+
+ dprintk("%s: num_dev %d\n", __func__, res->num_devs);
+
+ if (res->num_devs > NFS4_PNFS_GETDEVLIST_MAXNUM) {
+ printk(KERN_ERR "NFS: %s too many result dev_num %u\n",
+ __func__, res->num_devs);
+ return -EIO;
+ }
+
+ p = xdr_inline_decode(xdr,
+ res->num_devs * NFS4_DEVICEID4_SIZE + 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ for (i = 0; i < res->num_devs; i++)
+ p = xdr_decode_opaque_fixed(p, res->dev_id[i].data,
+ NFS4_DEVICEID4_SIZE);
+ res->eof = be32_to_cpup(p);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_getdeviceinfo(struct xdr_stream *xdr,
+ struct pnfs_device *pdev)
+{
+ __be32 *p;
+ uint32_t len, type;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_GETDEVICEINFO);
+ if (status) {
+ if (status == -ETOOSMALL) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ pdev->mincount = be32_to_cpup(p);
+ dprintk("%s: Min count too small. mincnt = %u\n",
+ __func__, pdev->mincount);
+ }
+ return status;
+ }
+
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ type = be32_to_cpup(p++);
+ if (type != pdev->layout_type) {
+ dprintk("%s: layout mismatch req: %u pdev: %u\n",
+ __func__, pdev->layout_type, type);
+ return -EINVAL;
+ }
+ /*
+ * Get the length of the opaque device_addr4. xdr_read_pages places
+ * the opaque device_addr4 in the xdr_buf->pages (pnfs_device->pages)
+ * and places the remaining xdr data in xdr_buf->tail
+ */
+ pdev->mincount = be32_to_cpup(p);
+ xdr_read_pages(xdr, pdev->mincount); /* include space for the length */
+
+ /* Parse notification bitmap, verifying that it is zero. */
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ len = be32_to_cpup(p);
+ if (len) {
+ uint32_t i;
+
+ p = xdr_inline_decode(xdr, 4 * len);
+ if (unlikely(!p))
+ goto out_overflow;
+ for (i = 0; i < len; i++, p++) {
+ if (be32_to_cpup(p)) {
+ dprintk("%s: notifications not supported\n",
+ __func__);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
+ struct nfs4_layoutget_res *res)
+{
+ __be32 *p;
+ int status;
+ u32 layout_count;
+ struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
+ struct kvec *iov = rcvbuf->head;
+ u32 hdrlen, recvd;
+
+ status = decode_op_hdr(xdr, OP_LAYOUTGET);
+ if (status)
+ return status;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->return_on_close = be32_to_cpup(p);
+ decode_stateid(xdr, &res->stateid);
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ layout_count = be32_to_cpup(p);
+ if (!layout_count) {
+ dprintk("%s: server responded with empty layout array\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ p = xdr_inline_decode(xdr, 28);
+ if (unlikely(!p))
+ goto out_overflow;
+ p = xdr_decode_hyper(p, &res->range.offset);
+ p = xdr_decode_hyper(p, &res->range.length);
+ res->range.iomode = be32_to_cpup(p++);
+ res->type = be32_to_cpup(p++);
+ res->layoutp->len = be32_to_cpup(p);
+
+ dprintk("%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n",
+ __func__,
+ (unsigned long)res->range.offset,
+ (unsigned long)res->range.length,
+ res->range.iomode,
+ res->type,
+ res->layoutp->len);
+
+ hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
+ recvd = req->rq_rcv_buf.len - hdrlen;
+ if (res->layoutp->len > recvd) {
+ dprintk("NFS: server cheating in layoutget reply: "
+ "layout len %u > recvd %u\n",
+ res->layoutp->len, recvd);
+ return -EINVAL;
+ }
+
+ xdr_read_pages(xdr, res->layoutp->len);
+
+ if (layout_count > 1) {
+ /* We only handle a length one array at the moment. Any
+ * further entries are just ignored. Note that this means
+ * the client may see a response that is less than the
+ * minimum it requested.
+ */
+ dprintk("%s: server responded with %d layouts, dropping tail\n",
+ __func__, layout_count);
+ }
+
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_layoutreturn(struct xdr_stream *xdr,
+ struct nfs4_layoutreturn_res *res)
+{
+ __be32 *p;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_LAYOUTRETURN);
+ if (status)
+ return status;
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->lrs_present = be32_to_cpup(p);
+ if (res->lrs_present)
+ status = decode_stateid(xdr, &res->stateid);
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_layoutcommit(struct xdr_stream *xdr,
+ struct rpc_rqst *req,
+ struct nfs4_layoutcommit_res *res)
+{
+ __be32 *p;
+ __u32 sizechanged;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_LAYOUTCOMMIT);
+ res->status = status;
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ sizechanged = be32_to_cpup(p);
+
+ if (sizechanged) {
+ /* throw away new size */
+ p = xdr_inline_decode(xdr, 8);
+ if (unlikely(!p))
+ goto out_overflow;
+ }
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_test_stateid(struct xdr_stream *xdr,
+ struct nfs41_test_stateid_res *res)
+{
+ __be32 *p;
+ int status;
+ int num_res;
+
+ status = decode_op_hdr(xdr, OP_TEST_STATEID);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ num_res = be32_to_cpup(p++);
+ if (num_res != 1)
+ goto out;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->status = be32_to_cpup(p++);
+
+ return status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+out:
+ return -EIO;
+}
+
+static int decode_free_stateid(struct xdr_stream *xdr,
+ struct nfs41_free_stateid_res *res)
+{
+ __be32 *p;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_FREE_STATEID);
+ if (status)
+ return status;
+
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ res->status = be32_to_cpup(p++);
+ return res->status;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
+ * END OF "GENERIC" DECODE ROUTINES.
+ */
+
+/*
+ * Decode OPEN_DOWNGRADE response
+ */
+static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs_closeres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_open_downgrade(xdr, res);
+ if (status != 0)
+ goto out;
+ decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode ACCESS response
+ */
+static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_accessres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status != 0)
+ goto out;
+ status = decode_access(xdr, res);
+ if (status != 0)
+ goto out;
+ decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode LOOKUP response
+ */
+static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_lookup_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_lookup(xdr);
+ if (status)
+ goto out;
+ status = decode_getfh(xdr, res->fh);
+ if (status)
+ goto out;
+ status = decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode LOOKUP_ROOT response
+ */
+static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_lookup_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putrootfh(xdr);
+ if (status)
+ goto out;
+ status = decode_getfh(xdr, res->fh);
+ if (status == 0)
+ status = decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode REMOVE response
+ */
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_removeres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_remove(xdr, &res->cinfo);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->dir_attr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode RENAME response
+ */
+static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_renameres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_savefh(xdr);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
+ if (status)
+ goto out;
+ /* Current FH is target directory */
+ if (decode_getfattr(xdr, res->new_fattr, res->server))
+ goto out;
+ status = decode_restorefh(xdr);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->old_fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode LINK response
+ */
+static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_link_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_savefh(xdr);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_link(xdr, &res->cinfo);
+ if (status)
+ goto out;
+ /*
+ * Note order: OP_LINK leaves the directory as the current
+ * filehandle.
+ */
+ if (decode_getfattr(xdr, res->dir_attr, res->server))
+ goto out;
+ status = decode_restorefh(xdr);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode CREATE response
+ */
+static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_create_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_savefh(xdr);
+ if (status)
+ goto out;
+ status = decode_create(xdr, &res->dir_cinfo);
+ if (status)
+ goto out;
+ status = decode_getfh(xdr, res->fh);
+ if (status)
+ goto out;
+ if (decode_getfattr(xdr, res->fattr, res->server))
+ goto out;
+ status = decode_restorefh(xdr);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->dir_fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode SYMLINK response
+ */
+static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_create_res *res)
+{
+ return nfs4_xdr_dec_create(rqstp, xdr, res);
+}
+
+/*
+ * Decode GETATTR response
+ */
+static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_getattr_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Encode an SETACL request
+ */
+static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs_setaclargs *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->fh, &hdr);
+ encode_setacl(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
+ * Decode SETACL response
+ */
+static int
+nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_setaclres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_setattr(xdr);
+out:
+ return status;
+}
+
+/*
+ * Decode GETACL response
+ */
+static int
+nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_getaclres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ if (res->acl_scratch != NULL) {
+ void *p = page_address(res->acl_scratch);
+ xdr_set_scratch_buffer(xdr, p, PAGE_SIZE);
+ }
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_getacl(xdr, rqstp, res);
+
+out:
+ return status;
+}
+
+/*
+ * Decode CLOSE response
+ */
+static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_closeres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_close(xdr, res);
+ if (status != 0)
+ goto out;
+ /*
+ * Note: Server may do delete on close for this file
+ * in which case the getattr call will fail with
+ * an ESTALE error. Shouldn't be a problem,
+ * though, since fattr->valid will remain unset.
+ */
+ decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode OPEN response
+ */
+static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_openres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_savefh(xdr);
+ if (status)
+ goto out;
+ status = decode_open(xdr, res);
+ if (status)
+ goto out;
+ if (decode_getfh(xdr, &res->fh) != 0)
+ goto out;
+ if (decode_getfattr(xdr, res->f_attr, res->server) != 0)
+ goto out;
+ if (decode_restorefh(xdr) != 0)
+ goto out;
+ decode_getfattr(xdr, res->dir_attr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode OPEN_CONFIRM response
+ */
+static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs_open_confirmres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_open_confirm(xdr, res);
+out:
+ return status;
+}
+
+/*
+ * Decode OPEN response
+ */
+static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs_openres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_open(xdr, res);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->f_attr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode SETATTR response
+ */
+static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs_setattrres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_setattr(xdr);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode LOCK response
+ */
+static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_lock_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_lock(xdr, res);
+out:
+ return status;
+}
+
+/*
+ * Decode LOCKT response
+ */
+static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_lockt_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_lockt(xdr, res);
+out:
+ return status;
+}
+
+/*
+ * Decode LOCKU response
+ */
+static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_locku_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_locku(xdr, res);
+out:
+ return status;
+}
+
+static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr, void *dummy)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_release_lockowner(xdr);
+ return status;
+}
+
+/*
+ * Decode READLINK response
+ */
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_readlink_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_readlink(xdr, rqstp);
+out:
+ return status;
+}
+
+/*
+ * Decode READDIR response
+ */
+static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs4_readdir_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_readdir(xdr, rqstp, res);
+out:
+ return status;
+}
+
+/*
+ * Decode Read response
+ */
+static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_readres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_read(xdr, rqstp, res);
+ if (!status)
+ status = res->count;
+out:
+ return status;
+}
+
+/*
+ * Decode WRITE response
+ */
+static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_writeres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_write(xdr, res);
+ if (status)
+ goto out;
+ if (res->fattr)
+ decode_getfattr(xdr, res->fattr, res->server);
+ if (!status)
+ status = res->count;
+out:
+ return status;
+}
+
+/*
+ * Decode COMMIT response
+ */
+static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ struct nfs_writeres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_commit(xdr, res);
+ if (status)
+ goto out;
+ if (res->fattr)
+ decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode FSINFO response
+ */
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs4_fsinfo_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_sequence(xdr, &res->seq_res, req);
+ if (!status)
+ status = decode_putfh(xdr);
+ if (!status)
+ status = decode_fsinfo(xdr, res->fsinfo);
+ return status;
+}
+
+/*
+ * Decode PATHCONF response
+ */
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs4_pathconf_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_sequence(xdr, &res->seq_res, req);
+ if (!status)
+ status = decode_putfh(xdr);
+ if (!status)
+ status = decode_pathconf(xdr, res->pathconf);
+ return status;
+}
+
+/*
+ * Decode STATFS response
+ */
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+ struct nfs4_statfs_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_sequence(xdr, &res->seq_res, req);
+ if (!status)
+ status = decode_putfh(xdr);
+ if (!status)
+ status = decode_statfs(xdr, res->fsstat);
+ return status;
+}
+
+/*
+ * Decode GETATTR_BITMAP response
+ */
+static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_server_caps_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, req);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_server_caps(xdr, res);
+out:
+ return status;
+}
+
+/*
+ * Decode RENEW response
+ */
+static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+ void *__unused)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_renew(xdr);
+ return status;
+}
+
+/*
+ * Decode SETCLIENTID response
+ */
+static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_setclientid_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_setclientid(xdr, res);
+ return status;
+}
+
+/*
+ * Decode SETCLIENTID_CONFIRM response
+ */
+static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs_fsinfo *fsinfo)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_setclientid_confirm(xdr);
+ if (!status)
+ status = decode_putrootfh(xdr);
+ if (!status)
+ status = decode_fsinfo(xdr, fsinfo);
+ return status;
+}
+
+/*
+ * Decode DELEGRETURN response
+ */
+static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_delegreturnres *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status != 0)
+ goto out;
+ status = decode_delegreturn(xdr);
+ if (status != 0)
+ goto out;
+ decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode FS_LOCATIONS response
+ */
+static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs4_fs_locations_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, req);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_lookup(xdr);
+ if (status)
+ goto out;
+ xdr_enter_page(xdr, PAGE_SIZE);
+ status = decode_getfattr_generic(xdr, &res->fs_locations->fattr,
+ NULL, res->fs_locations,
+ res->fs_locations->server);
+out:
+ return status;
+}
+
+/*
+ * Decode SECINFO response
+ */
+static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_secinfo_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_secinfo(xdr, res);
+out:
+ return status;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * Decode EXCHANGE_ID response
+ */
+static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_exchange_id(xdr, res);
+ return status;
+}
+
+/*
+ * Decode CREATE_SESSION response
+ */
+static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs41_create_session_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_create_session(xdr, res);
+ return status;
+}
+
+/*
+ * Decode DESTROY_SESSION response
+ */
+static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_destroy_session(xdr, res);
+ return status;
+}
+
+/*
+ * Decode SEQUENCE response
+ */
+static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_sequence_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_sequence(xdr, res, rqstp);
+ return status;
+}
+
+/*
+ * Decode GET_LEASE_TIME response
+ */
+static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_get_lease_time_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_sequence(xdr, &res->lr_seq_res, rqstp);
+ if (!status)
+ status = decode_putrootfh(xdr);
+ if (!status)
+ status = decode_fsinfo(xdr, res->lr_fsinfo);
+ return status;
+}
+
+/*
+ * Decode RECLAIM_COMPLETE response
+ */
+static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs41_reclaim_complete_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (!status)
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (!status)
+ status = decode_reclaim_complete(xdr, (void *)NULL);
+ return status;
+}
+
+/*
+ * Decode GETDEVICELIST response
+ */
+static int nfs4_xdr_dec_getdevicelist(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_getdevicelist_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ dprintk("encoding getdevicelist!\n");
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status != 0)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status != 0)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status != 0)
+ goto out;
+ status = decode_getdevicelist(xdr, res->devlist);
+out:
+ return status;
+}
+
+/*
+ * Decode GETDEVINFO response
+ */
+static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_getdeviceinfo_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status != 0)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status != 0)
+ goto out;
+ status = decode_getdeviceinfo(xdr, res->pdev);
+out:
+ return status;
+}
+
+/*
+ * Decode LAYOUTGET response
+ */
+static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutget_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_layoutget(xdr, rqstp, res);
+out:
+ return status;
+}
+
+/*
+ * Decode LAYOUTRETURN response
+ */
+static int nfs4_xdr_dec_layoutreturn(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutreturn_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_layoutreturn(xdr, res);
+out:
+ return status;
+}
+
+/*
+ * Decode LAYOUTCOMMIT response
+ */
+static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_layoutcommit_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_layoutcommit(xdr, rqstp, res);
+ if (status)
+ goto out;
+ decode_getfattr(xdr, res->fattr, res->server);
+out:
+ return status;
+}
+
+/*
+ * Decode SECINFO_NO_NAME response
+ */
+static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs4_secinfo_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putrootfh(xdr);
+ if (status)
+ goto out;
+ status = decode_secinfo_no_name(xdr, res);
+out:
+ return status;
+}
+
+/*
+ * Decode TEST_STATEID response
+ */
+static int nfs4_xdr_dec_test_stateid(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs41_test_stateid_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_test_stateid(xdr, res);
+out:
+ return status;
+}
+
+/*
+ * Decode FREE_STATEID response
+ */
+static int nfs4_xdr_dec_free_stateid(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs41_free_stateid_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_free_stateid(xdr, res);
+out:
+ return status;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/**
+ * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in
+ * the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ */
+int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+ int plus)
+{
+ uint32_t bitmap[3] = {0};
+ uint32_t len;
+ __be32 *p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (*p == xdr_zero) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ goto out_overflow;
+ if (*p == xdr_zero)
+ return -EAGAIN;
+ entry->eof = 1;
+ return -EBADCOOKIE;
+ }
+
+ p = xdr_inline_decode(xdr, 12);
+ if (unlikely(!p))
+ goto out_overflow;
+ entry->prev_cookie = entry->cookie;
+ p = xdr_decode_hyper(p, &entry->cookie);
+ entry->len = be32_to_cpup(p);
+
+ p = xdr_inline_decode(xdr, entry->len);
+ if (unlikely(!p))
+ goto out_overflow;
+ entry->name = (const char *) p;
+
+ /*
+ * In case the server doesn't return an inode number,
+ * we fake one here. (We don't use inode number 0,
+ * since glibc seems to choke on it...)
+ */
+ entry->ino = 1;
+ entry->fattr->valid = 0;
+
+ if (decode_attr_bitmap(xdr, bitmap) < 0)
+ goto out_overflow;
+
+ if (decode_attr_length(xdr, &len, &p) < 0)
+ goto out_overflow;
+
+ if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
+ NULL, entry->server) < 0)
+ goto out_overflow;
+ if (entry->fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+ entry->ino = entry->fattr->mounted_on_fileid;
+ else if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
+ entry->ino = entry->fattr->fileid;
+
+ entry->d_type = DT_UNKNOWN;
+ if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
+ entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
+
+ return 0;
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EAGAIN;
+}
+
+/*
+ * We need to translate between nfs status return values and
+ * the local errno values which may not be the same.
+ */
+static struct {
+ int stat;
+ int errno;
+} nfs_errtbl[] = {
+ { NFS4_OK, 0 },
+ { NFS4ERR_PERM, -EPERM },
+ { NFS4ERR_NOENT, -ENOENT },
+ { NFS4ERR_IO, -errno_NFSERR_IO},
+ { NFS4ERR_NXIO, -ENXIO },
+ { NFS4ERR_ACCESS, -EACCES },
+ { NFS4ERR_EXIST, -EEXIST },
+ { NFS4ERR_XDEV, -EXDEV },
+ { NFS4ERR_NOTDIR, -ENOTDIR },
+ { NFS4ERR_ISDIR, -EISDIR },
+ { NFS4ERR_INVAL, -EINVAL },
+ { NFS4ERR_FBIG, -EFBIG },
+ { NFS4ERR_NOSPC, -ENOSPC },
+ { NFS4ERR_ROFS, -EROFS },
+ { NFS4ERR_MLINK, -EMLINK },
+ { NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
+ { NFS4ERR_NOTEMPTY, -ENOTEMPTY },
+ { NFS4ERR_DQUOT, -EDQUOT },
+ { NFS4ERR_STALE, -ESTALE },
+ { NFS4ERR_BADHANDLE, -EBADHANDLE },
+ { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFS4ERR_NOTSUPP, -ENOTSUPP },
+ { NFS4ERR_TOOSMALL, -ETOOSMALL },
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_SYMLINK, -ELOOP },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
+ { NFS4ERR_DEADLOCK, -EDEADLK },
+ { -1, -EIO }
+};
+
+/*
+ * Convert an NFS error code to a local one.
+ * This one is used jointly by NFSv2 and NFSv3.
+ */
+static int
+nfs4_stat_to_errno(int stat)
+{
+ int i;
+ for (i = 0; nfs_errtbl[i].stat != -1; i++) {
+ if (nfs_errtbl[i].stat == stat)
+ return nfs_errtbl[i].errno;
+ }
+ if (stat <= 10000 || stat > 10100) {
+ /* The server is looney tunes. */
+ return -EREMOTEIO;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+ return -stat;
+}
+
+#define PROC(proc, argtype, restype) \
+[NFSPROC4_CLNT_##proc] = { \
+ .p_proc = NFSPROC4_COMPOUND, \
+ .p_encode = (kxdreproc_t)nfs4_xdr_##argtype, \
+ .p_decode = (kxdrdproc_t)nfs4_xdr_##restype, \
+ .p_arglen = NFS4_##argtype##_sz, \
+ .p_replen = NFS4_##restype##_sz, \
+ .p_statidx = NFSPROC4_CLNT_##proc, \
+ .p_name = #proc, \
+}
+
+struct rpc_procinfo nfs4_procedures[] = {
+ PROC(READ, enc_read, dec_read),
+ PROC(WRITE, enc_write, dec_write),
+ PROC(COMMIT, enc_commit, dec_commit),
+ PROC(OPEN, enc_open, dec_open),
+ PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm),
+ PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr),
+ PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade),
+ PROC(CLOSE, enc_close, dec_close),
+ PROC(SETATTR, enc_setattr, dec_setattr),
+ PROC(FSINFO, enc_fsinfo, dec_fsinfo),
+ PROC(RENEW, enc_renew, dec_renew),
+ PROC(SETCLIENTID, enc_setclientid, dec_setclientid),
+ PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
+ PROC(LOCK, enc_lock, dec_lock),
+ PROC(LOCKT, enc_lockt, dec_lockt),
+ PROC(LOCKU, enc_locku, dec_locku),
+ PROC(ACCESS, enc_access, dec_access),
+ PROC(GETATTR, enc_getattr, dec_getattr),
+ PROC(LOOKUP, enc_lookup, dec_lookup),
+ PROC(LOOKUP_ROOT, enc_lookup_root, dec_lookup_root),
+ PROC(REMOVE, enc_remove, dec_remove),
+ PROC(RENAME, enc_rename, dec_rename),
+ PROC(LINK, enc_link, dec_link),
+ PROC(SYMLINK, enc_symlink, dec_symlink),
+ PROC(CREATE, enc_create, dec_create),
+ PROC(PATHCONF, enc_pathconf, dec_pathconf),
+ PROC(STATFS, enc_statfs, dec_statfs),
+ PROC(READLINK, enc_readlink, dec_readlink),
+ PROC(READDIR, enc_readdir, dec_readdir),
+ PROC(SERVER_CAPS, enc_server_caps, dec_server_caps),
+ PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn),
+ PROC(GETACL, enc_getacl, dec_getacl),
+ PROC(SETACL, enc_setacl, dec_setacl),
+ PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
+ PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
+ PROC(SECINFO, enc_secinfo, dec_secinfo),
+#if defined(CONFIG_NFS_V4_1)
+ PROC(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),
+ PROC(CREATE_SESSION, enc_create_session, dec_create_session),
+ PROC(DESTROY_SESSION, enc_destroy_session, dec_destroy_session),
+ PROC(SEQUENCE, enc_sequence, dec_sequence),
+ PROC(GET_LEASE_TIME, enc_get_lease_time, dec_get_lease_time),
+ PROC(RECLAIM_COMPLETE, enc_reclaim_complete, dec_reclaim_complete),
+ PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
+ PROC(LAYOUTGET, enc_layoutget, dec_layoutget),
+ PROC(LAYOUTCOMMIT, enc_layoutcommit, dec_layoutcommit),
+ PROC(LAYOUTRETURN, enc_layoutreturn, dec_layoutreturn),
+ PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
+ PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
+ PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
+ PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist),
+#endif /* CONFIG_NFS_V4_1 */
+};
+
+const struct rpc_version nfs_version4 = {
+ .number = 4,
+ .nrprocs = ARRAY_SIZE(nfs4_procedures),
+ .procs = nfs4_procedures
+};
+
+/*
+ * Local variables:
+ * c-basic-offset: 8
+ * End:
+ */
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
new file mode 100644
index 00000000..cd3c910d
--- /dev/null
+++ b/fs/nfs/nfsroot.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (C) 1995, 1996 Gero Kuhlmann <gero@gkminix.han.de>
+ *
+ * Allow an NFS filesystem to be mounted as root. The way this works is:
+ * (1) Use the IP autoconfig mechanism to set local IP addresses and routes.
+ * (2) Construct the device string and the options string using DHCP
+ * option 17 and/or kernel command line options.
+ * (3) When mount_root() sets up the root file system, pass these strings
+ * to the NFS client's regular mount interface via sys_mount().
+ *
+ *
+ * Changes:
+ *
+ * Alan Cox : Removed get_address name clash with FPU.
+ * Alan Cox : Reformatted a bit.
+ * Gero Kuhlmann : Code cleanup
+ * Michael Rausch : Fixed recognition of an incoming RARP answer.
+ * Martin Mares : (2.0) Auto-configuration via BOOTP supported.
+ * Martin Mares : Manual selection of interface & BOOTP/RARP.
+ * Martin Mares : Using network routes instead of host routes,
+ * allowing the default configuration to be used
+ * for normal operation of the host.
+ * Martin Mares : Randomized timer with exponential backoff
+ * installed to minimize network congestion.
+ * Martin Mares : Code cleanup.
+ * Martin Mares : (2.1) BOOTP and RARP made configuration options.
+ * Martin Mares : Server hostname generation fixed.
+ * Gerd Knorr : Fixed wired inode handling
+ * Martin Mares : (2.2) "0.0.0.0" addresses from command line ignored.
+ * Martin Mares : RARP replies not tested for server address.
+ * Gero Kuhlmann : (2.3) Some bug fixes and code cleanup again (please
+ * send me your new patches _before_ bothering
+ * Linus so that I don' always have to cleanup
+ * _afterwards_ - thanks)
+ * Gero Kuhlmann : Last changes of Martin Mares undone.
+ * Gero Kuhlmann : RARP replies are tested for specified server
+ * again. However, it's now possible to have
+ * different RARP and NFS servers.
+ * Gero Kuhlmann : "0.0.0.0" addresses from command line are
+ * now mapped to INADDR_NONE.
+ * Gero Kuhlmann : Fixed a bug which prevented BOOTP path name
+ * from being used (thanks to Leo Spiekman)
+ * Andy Walker : Allow to specify the NFS server in nfs_root
+ * without giving a path name
+ * Swen Thümmler : Allow to specify the NFS options in nfs_root
+ * without giving a path name. Fix BOOTP request
+ * for domainname (domainname is NIS domain, not
+ * DNS domain!). Skip dummy devices for BOOTP.
+ * Jacek Zapala : Fixed a bug which prevented server-ip address
+ * from nfsroot parameter from being used.
+ * Olaf Kirch : Adapted to new NFS code.
+ * Jakub Jelinek : Free used code segment.
+ * Marko Kohtala : Fixed some bugs.
+ * Martin Mares : Debug message cleanup
+ * Martin Mares : Changed to use the new generic IP layer autoconfig
+ * code. BOOTP and RARP moved there.
+ * Martin Mares : Default path now contains host name instead of
+ * host IP address (but host name defaults to IP
+ * address anyway).
+ * Martin Mares : Use root_server_addr appropriately during setup.
+ * Martin Mares : Rewrote parameter parsing, now hopefully giving
+ * correct overriding.
+ * Trond Myklebust : Add in preliminary support for NFSv3 and TCP.
+ * Fix bug in root_nfs_addr(). nfs_data.namlen
+ * is NOT for the length of the hostname.
+ * Hua Qin : Support for mounting root file system via
+ * NFS over TCP.
+ * Fabian Frederick: Option parser rebuilt (using parser lib)
+ * Chuck Lever : Use super.c's text-based mount option parsing
+ * Chuck Lever : Add "nfsrootdebug".
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/nfs.h>
+#include <linux/nfs_fs.h>
+#include <linux/utsname.h>
+#include <linux/root_dev.h>
+#include <net/ipconfig.h>
+
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_ROOT
+
+/* Default path we try to mount. "%s" gets replaced by our IP address */
+#define NFS_ROOT "/tftpboot/%s"
+
+/* Default NFSROOT mount options. */
+#define NFS_DEF_OPTIONS "vers=2,udp,rsize=4096,wsize=4096"
+
+/* Parameters passed from the kernel command line */
+static char nfs_root_parms[256] __initdata = "";
+
+/* Text-based mount options passed to super.c */
+static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS;
+
+/* Address of NFS server */
+static __be32 servaddr __initdata = htonl(INADDR_NONE);
+
+/* Name of directory to mount */
+static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = "";
+
+/* server:export path string passed to super.c */
+static char nfs_root_device[NFS_MAXPATHLEN + 1] __initdata = "";
+
+#ifdef NFS_DEBUG
+/*
+ * When the "nfsrootdebug" kernel command line option is specified,
+ * enable debugging messages for NFSROOT.
+ */
+static int __init nfs_root_debug(char *__unused)
+{
+ nfs_debug |= NFSDBG_ROOT | NFSDBG_MOUNT;
+ return 1;
+}
+
+__setup("nfsrootdebug", nfs_root_debug);
+#endif
+
+/*
+ * Parse NFS server and directory information passed on the kernel
+ * command line.
+ *
+ * nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>]
+ *
+ * If there is a "%s" token in the <root-dir> string, it is replaced
+ * by the ASCII-representation of the client's IP address.
+ */
+static int __init nfs_root_setup(char *line)
+{
+ ROOT_DEV = Root_NFS;
+
+ if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) {
+ strlcpy(nfs_root_parms, line, sizeof(nfs_root_parms));
+ } else {
+ size_t n = strlen(line) + sizeof(NFS_ROOT) - 1;
+ if (n >= sizeof(nfs_root_parms))
+ line[sizeof(nfs_root_parms) - sizeof(NFS_ROOT) - 2] = '\0';
+ sprintf(nfs_root_parms, NFS_ROOT, line);
+ }
+
+ /*
+ * Extract the IP address of the NFS server containing our
+ * root file system, if one was specified.
+ *
+ * Note: root_nfs_parse_addr() removes the server-ip from
+ * nfs_root_parms, if it exists.
+ */
+ root_server_addr = root_nfs_parse_addr(nfs_root_parms);
+
+ return 1;
+}
+
+__setup("nfsroot=", nfs_root_setup);
+
+static int __init root_nfs_copy(char *dest, const char *src,
+ const size_t destlen)
+{
+ if (strlcpy(dest, src, destlen) > destlen)
+ return -1;
+ return 0;
+}
+
+static int __init root_nfs_cat(char *dest, const char *src,
+ const size_t destlen)
+{
+ size_t len = strlen(dest);
+
+ if (len && dest[len - 1] != ',')
+ if (strlcat(dest, ",", destlen) > destlen)
+ return -1;
+
+ if (strlcat(dest, src, destlen) > destlen)
+ return -1;
+ return 0;
+}
+
+/*
+ * Parse out root export path and mount options from
+ * passed-in string @incoming.
+ *
+ * Copy the export path into @exppath.
+ */
+static int __init root_nfs_parse_options(char *incoming, char *exppath,
+ const size_t exppathlen)
+{
+ char *p;
+
+ /*
+ * Set the NFS remote path
+ */
+ p = strsep(&incoming, ",");
+ if (*p != '\0' && strcmp(p, "default") != 0)
+ if (root_nfs_copy(exppath, p, exppathlen))
+ return -1;
+
+ /*
+ * @incoming now points to the rest of the string; if it
+ * contains something, append it to our root options buffer
+ */
+ if (incoming != NULL && *incoming != '\0')
+ if (root_nfs_cat(nfs_root_options, incoming,
+ sizeof(nfs_root_options)))
+ return -1;
+ return 0;
+}
+
+/*
+ * Decode the export directory path name and NFS options from
+ * the kernel command line. This has to be done late in order to
+ * use a dynamically acquired client IP address for the remote
+ * root directory path.
+ *
+ * Returns zero if successful; otherwise -1 is returned.
+ */
+static int __init root_nfs_data(char *cmdline)
+{
+ char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
+ int len, retval = -1;
+ char *tmp = NULL;
+ const size_t tmplen = sizeof(nfs_export_path);
+
+ tmp = kzalloc(tmplen, GFP_KERNEL);
+ if (tmp == NULL)
+ goto out_nomem;
+ strcpy(tmp, NFS_ROOT);
+
+ if (root_server_path[0] != '\0') {
+ dprintk("Root-NFS: DHCPv4 option 17: %s\n",
+ root_server_path);
+ if (root_nfs_parse_options(root_server_path, tmp, tmplen))
+ goto out_optionstoolong;
+ }
+
+ if (cmdline[0] != '\0') {
+ dprintk("Root-NFS: nfsroot=%s\n", cmdline);
+ if (root_nfs_parse_options(cmdline, tmp, tmplen))
+ goto out_optionstoolong;
+ }
+
+ /*
+ * Append mandatory options for nfsroot so they override
+ * what has come before
+ */
+ snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4",
+ &servaddr);
+ if (root_nfs_cat(nfs_root_options, mand_options,
+ sizeof(nfs_root_options)))
+ goto out_optionstoolong;
+
+ /*
+ * Set up nfs_root_device. For NFS mounts, this looks like
+ *
+ * server:/path
+ *
+ * At this point, utsname()->nodename contains our local
+ * IP address or hostname, set by ipconfig. If "%s" exists
+ * in tmp, substitute the nodename, then shovel the whole
+ * mess into nfs_root_device.
+ */
+ len = snprintf(nfs_export_path, sizeof(nfs_export_path),
+ tmp, utsname()->nodename);
+ if (len > (int)sizeof(nfs_export_path))
+ goto out_devnametoolong;
+ len = snprintf(nfs_root_device, sizeof(nfs_root_device),
+ "%pI4:%s", &servaddr, nfs_export_path);
+ if (len > (int)sizeof(nfs_root_device))
+ goto out_devnametoolong;
+
+ retval = 0;
+
+out:
+ kfree(tmp);
+ return retval;
+out_nomem:
+ printk(KERN_ERR "Root-NFS: could not allocate memory\n");
+ goto out;
+out_optionstoolong:
+ printk(KERN_ERR "Root-NFS: mount options string too long\n");
+ goto out;
+out_devnametoolong:
+ printk(KERN_ERR "Root-NFS: root device name too long.\n");
+ goto out;
+}
+
+/**
+ * nfs_root_data - Return prepared 'data' for NFSROOT mount
+ * @root_device: OUT: address of string containing NFSROOT device
+ * @root_data: OUT: address of string containing NFSROOT mount options
+ *
+ * Returns zero and sets @root_device and @root_data if successful,
+ * otherwise -1 is returned.
+ */
+int __init nfs_root_data(char **root_device, char **root_data)
+{
+ servaddr = root_server_addr;
+ if (servaddr == htonl(INADDR_NONE)) {
+ printk(KERN_ERR "Root-NFS: no NFS server address\n");
+ return -1;
+ }
+
+ if (root_nfs_data(nfs_root_parms) < 0)
+ return -1;
+
+ *root_device = nfs_root_device;
+ *root_data = nfs_root_options;
+ return 0;
+}
diff --git a/fs/nfs/objlayout/Kbuild b/fs/nfs/objlayout/Kbuild
new file mode 100644
index 00000000..ed30ea07
--- /dev/null
+++ b/fs/nfs/objlayout/Kbuild
@@ -0,0 +1,5 @@
+#
+# Makefile for the pNFS Objects Layout Driver kernel module
+#
+objlayoutdriver-y := objio_osd.o pnfs_osd_xdr_cli.o objlayout.o
+obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayoutdriver.o
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
new file mode 100644
index 00000000..4bff4a3d
--- /dev/null
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -0,0 +1,624 @@
+/*
+ * pNFS Objects layout implementation over open-osd initiator library
+ *
+ * Copyright (C) 2009 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <scsi/osd_ore.h>
+
+#include "objlayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+struct objio_dev_ent {
+ struct nfs4_deviceid_node id_node;
+ struct ore_dev od;
+};
+
+static void
+objio_free_deviceid_node(struct nfs4_deviceid_node *d)
+{
+ struct objio_dev_ent *de = container_of(d, struct objio_dev_ent, id_node);
+
+ dprintk("%s: free od=%p\n", __func__, de->od.od);
+ osduld_put_device(de->od.od);
+ kfree(de);
+}
+
+static struct objio_dev_ent *_dev_list_find(const struct nfs_server *nfss,
+ const struct nfs4_deviceid *d_id)
+{
+ struct nfs4_deviceid_node *d;
+ struct objio_dev_ent *de;
+
+ d = nfs4_find_get_deviceid(nfss->pnfs_curr_ld, nfss->nfs_client, d_id);
+ if (!d)
+ return NULL;
+
+ de = container_of(d, struct objio_dev_ent, id_node);
+ return de;
+}
+
+static struct objio_dev_ent *
+_dev_list_add(const struct nfs_server *nfss,
+ const struct nfs4_deviceid *d_id, struct osd_dev *od,
+ gfp_t gfp_flags)
+{
+ struct nfs4_deviceid_node *d;
+ struct objio_dev_ent *de = kzalloc(sizeof(*de), gfp_flags);
+ struct objio_dev_ent *n;
+
+ if (!de) {
+ dprintk("%s: -ENOMEM od=%p\n", __func__, od);
+ return NULL;
+ }
+
+ dprintk("%s: Adding od=%p\n", __func__, od);
+ nfs4_init_deviceid_node(&de->id_node,
+ nfss->pnfs_curr_ld,
+ nfss->nfs_client,
+ d_id);
+ de->od.od = od;
+
+ d = nfs4_insert_deviceid_node(&de->id_node);
+ n = container_of(d, struct objio_dev_ent, id_node);
+ if (n != de) {
+ dprintk("%s: Race with other n->od=%p\n", __func__, n->od.od);
+ objio_free_deviceid_node(&de->id_node);
+ de = n;
+ }
+
+ return de;
+}
+
+struct objio_segment {
+ struct pnfs_layout_segment lseg;
+
+ struct ore_layout layout;
+ struct ore_components oc;
+};
+
+static inline struct objio_segment *
+OBJIO_LSEG(struct pnfs_layout_segment *lseg)
+{
+ return container_of(lseg, struct objio_segment, lseg);
+}
+
+struct objio_state {
+ /* Generic layer */
+ struct objlayout_io_res oir;
+
+ bool sync;
+ /*FIXME: Support for extra_bytes at ore_get_rw_state() */
+ struct ore_io_state *ios;
+};
+
+/* Send and wait for a get_device_info of devices in the layout,
+ then look them up with the osd_initiator library */
+static int objio_devices_lookup(struct pnfs_layout_hdr *pnfslay,
+ struct objio_segment *objio_seg, unsigned c, struct nfs4_deviceid *d_id,
+ gfp_t gfp_flags)
+{
+ struct pnfs_osd_deviceaddr *deviceaddr;
+ struct objio_dev_ent *ode;
+ struct osd_dev *od;
+ struct osd_dev_info odi;
+ bool retry_flag = true;
+ int err;
+
+ ode = _dev_list_find(NFS_SERVER(pnfslay->plh_inode), d_id);
+ if (ode) {
+ objio_seg->oc.ods[c] = &ode->od; /* must use container_of */
+ return 0;
+ }
+
+ err = objlayout_get_deviceinfo(pnfslay, d_id, &deviceaddr, gfp_flags);
+ if (unlikely(err)) {
+ dprintk("%s: objlayout_get_deviceinfo dev(%llx:%llx) =>%d\n",
+ __func__, _DEVID_LO(d_id), _DEVID_HI(d_id), err);
+ return err;
+ }
+
+ odi.systemid_len = deviceaddr->oda_systemid.len;
+ if (odi.systemid_len > sizeof(odi.systemid)) {
+ dprintk("%s: odi.systemid_len > sizeof(systemid=%zd)\n",
+ __func__, sizeof(odi.systemid));
+ err = -EINVAL;
+ goto out;
+ } else if (odi.systemid_len)
+ memcpy(odi.systemid, deviceaddr->oda_systemid.data,
+ odi.systemid_len);
+ odi.osdname_len = deviceaddr->oda_osdname.len;
+ odi.osdname = (u8 *)deviceaddr->oda_osdname.data;
+
+ if (!odi.osdname_len && !odi.systemid_len) {
+ dprintk("%s: !odi.osdname_len && !odi.systemid_len\n",
+ __func__);
+ err = -ENODEV;
+ goto out;
+ }
+
+retry_lookup:
+ od = osduld_info_lookup(&odi);
+ if (unlikely(IS_ERR(od))) {
+ err = PTR_ERR(od);
+ dprintk("%s: osduld_info_lookup => %d\n", __func__, err);
+ if (err == -ENODEV && retry_flag) {
+ err = objlayout_autologin(deviceaddr);
+ if (likely(!err)) {
+ retry_flag = false;
+ goto retry_lookup;
+ }
+ }
+ goto out;
+ }
+
+ ode = _dev_list_add(NFS_SERVER(pnfslay->plh_inode), d_id, od,
+ gfp_flags);
+ objio_seg->oc.ods[c] = &ode->od; /* must use container_of */
+ dprintk("Adding new dev_id(%llx:%llx)\n",
+ _DEVID_LO(d_id), _DEVID_HI(d_id));
+out:
+ objlayout_put_deviceinfo(deviceaddr);
+ return err;
+}
+
+static void copy_single_comp(struct ore_components *oc, unsigned c,
+ struct pnfs_osd_object_cred *src_comp)
+{
+ struct ore_comp *ocomp = &oc->comps[c];
+
+ WARN_ON(src_comp->oc_cap_key.cred_len > 0); /* libosd is NO_SEC only */
+ WARN_ON(src_comp->oc_cap.cred_len > sizeof(ocomp->cred));
+
+ ocomp->obj.partition = src_comp->oc_object_id.oid_partition_id;
+ ocomp->obj.id = src_comp->oc_object_id.oid_object_id;
+
+ memcpy(ocomp->cred, src_comp->oc_cap.cred, sizeof(ocomp->cred));
+}
+
+int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
+ struct objio_segment **pseg)
+{
+/* This is the in memory structure of the objio_segment
+ *
+ * struct __alloc_objio_segment {
+ * struct objio_segment olseg;
+ * struct ore_dev *ods[numdevs];
+ * struct ore_comp comps[numdevs];
+ * } *aolseg;
+ * NOTE: The code as above compiles and runs perfectly. It is elegant,
+ * type safe and compact. At some Past time Linus has decided he does not
+ * like variable length arrays, For the sake of this principal we uglify
+ * the code as below.
+ */
+ struct objio_segment *lseg;
+ size_t lseg_size = sizeof(*lseg) +
+ numdevs * sizeof(lseg->oc.ods[0]) +
+ numdevs * sizeof(*lseg->oc.comps);
+
+ lseg = kzalloc(lseg_size, gfp_flags);
+ if (unlikely(!lseg)) {
+ dprintk("%s: Faild allocation numdevs=%d size=%zd\n", __func__,
+ numdevs, lseg_size);
+ return -ENOMEM;
+ }
+
+ lseg->oc.numdevs = numdevs;
+ lseg->oc.single_comp = EC_MULTPLE_COMPS;
+ lseg->oc.ods = (void *)(lseg + 1);
+ lseg->oc.comps = (void *)(lseg->oc.ods + numdevs);
+
+ *pseg = lseg;
+ return 0;
+}
+
+int objio_alloc_lseg(struct pnfs_layout_segment **outp,
+ struct pnfs_layout_hdr *pnfslay,
+ struct pnfs_layout_range *range,
+ struct xdr_stream *xdr,
+ gfp_t gfp_flags)
+{
+ struct objio_segment *objio_seg;
+ struct pnfs_osd_xdr_decode_layout_iter iter;
+ struct pnfs_osd_layout layout;
+ struct pnfs_osd_object_cred src_comp;
+ unsigned cur_comp;
+ int err;
+
+ err = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
+ if (unlikely(err))
+ return err;
+
+ err = __alloc_objio_seg(layout.olo_num_comps, gfp_flags, &objio_seg);
+ if (unlikely(err))
+ return err;
+
+ objio_seg->layout.stripe_unit = layout.olo_map.odm_stripe_unit;
+ objio_seg->layout.group_width = layout.olo_map.odm_group_width;
+ objio_seg->layout.group_depth = layout.olo_map.odm_group_depth;
+ objio_seg->layout.mirrors_p1 = layout.olo_map.odm_mirror_cnt + 1;
+ objio_seg->layout.raid_algorithm = layout.olo_map.odm_raid_algorithm;
+
+ err = ore_verify_layout(layout.olo_map.odm_num_comps,
+ &objio_seg->layout);
+ if (unlikely(err))
+ goto err;
+
+ objio_seg->oc.first_dev = layout.olo_comps_index;
+ cur_comp = 0;
+ while (pnfs_osd_xdr_decode_layout_comp(&src_comp, &iter, xdr, &err)) {
+ copy_single_comp(&objio_seg->oc, cur_comp, &src_comp);
+ err = objio_devices_lookup(pnfslay, objio_seg, cur_comp,
+ &src_comp.oc_object_id.oid_device_id,
+ gfp_flags);
+ if (err)
+ goto err;
+ ++cur_comp;
+ }
+ /* pnfs_osd_xdr_decode_layout_comp returns false on error */
+ if (unlikely(err))
+ goto err;
+
+ *outp = &objio_seg->lseg;
+ return 0;
+
+err:
+ kfree(objio_seg);
+ dprintk("%s: Error: return %d\n", __func__, err);
+ *outp = NULL;
+ return err;
+}
+
+void objio_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ int i;
+ struct objio_segment *objio_seg = OBJIO_LSEG(lseg);
+
+ for (i = 0; i < objio_seg->oc.numdevs; i++) {
+ struct ore_dev *od = objio_seg->oc.ods[i];
+ struct objio_dev_ent *ode;
+
+ if (!od)
+ break;
+ ode = container_of(od, typeof(*ode), od);
+ nfs4_put_deviceid_node(&ode->id_node);
+ }
+ kfree(objio_seg);
+}
+
+static int
+objio_alloc_io_state(struct pnfs_layout_hdr *pnfs_layout_type, bool is_reading,
+ struct pnfs_layout_segment *lseg, struct page **pages, unsigned pgbase,
+ loff_t offset, size_t count, void *rpcdata, gfp_t gfp_flags,
+ struct objio_state **outp)
+{
+ struct objio_segment *objio_seg = OBJIO_LSEG(lseg);
+ struct ore_io_state *ios;
+ int ret;
+ struct __alloc_objio_state {
+ struct objio_state objios;
+ struct pnfs_osd_ioerr ioerrs[objio_seg->oc.numdevs];
+ } *aos;
+
+ aos = kzalloc(sizeof(*aos), gfp_flags);
+ if (unlikely(!aos))
+ return -ENOMEM;
+
+ objlayout_init_ioerrs(&aos->objios.oir, objio_seg->oc.numdevs,
+ aos->ioerrs, rpcdata, pnfs_layout_type);
+
+ ret = ore_get_rw_state(&objio_seg->layout, &objio_seg->oc, is_reading,
+ offset, count, &ios);
+ if (unlikely(ret)) {
+ kfree(aos);
+ return ret;
+ }
+
+ ios->pages = pages;
+ ios->pgbase = pgbase;
+ ios->private = aos;
+ BUG_ON(ios->nr_pages > (pgbase + count + PAGE_SIZE - 1) >> PAGE_SHIFT);
+
+ aos->objios.sync = 0;
+ aos->objios.ios = ios;
+ *outp = &aos->objios;
+ return 0;
+}
+
+void objio_free_result(struct objlayout_io_res *oir)
+{
+ struct objio_state *objios = container_of(oir, struct objio_state, oir);
+
+ ore_put_io_state(objios->ios);
+ kfree(objios);
+}
+
+enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
+{
+ switch (oep) {
+ case OSD_ERR_PRI_NO_ERROR:
+ return (enum pnfs_osd_errno)0;
+
+ case OSD_ERR_PRI_CLEAR_PAGES:
+ BUG_ON(1);
+ return 0;
+
+ case OSD_ERR_PRI_RESOURCE:
+ return PNFS_OSD_ERR_RESOURCE;
+ case OSD_ERR_PRI_BAD_CRED:
+ return PNFS_OSD_ERR_BAD_CRED;
+ case OSD_ERR_PRI_NO_ACCESS:
+ return PNFS_OSD_ERR_NO_ACCESS;
+ case OSD_ERR_PRI_UNREACHABLE:
+ return PNFS_OSD_ERR_UNREACHABLE;
+ case OSD_ERR_PRI_NOT_FOUND:
+ return PNFS_OSD_ERR_NOT_FOUND;
+ case OSD_ERR_PRI_NO_SPACE:
+ return PNFS_OSD_ERR_NO_SPACE;
+ default:
+ WARN_ON(1);
+ /* fallthrough */
+ case OSD_ERR_PRI_EIO:
+ return PNFS_OSD_ERR_EIO;
+ }
+}
+
+static void __on_dev_error(struct ore_io_state *ios,
+ struct ore_dev *od, unsigned dev_index, enum osd_err_priority oep,
+ u64 dev_offset, u64 dev_len)
+{
+ struct objio_state *objios = ios->private;
+ struct pnfs_osd_objid pooid;
+ struct objio_dev_ent *ode = container_of(od, typeof(*ode), od);
+ /* FIXME: what to do with more-then-one-group layouts. We need to
+ * translate from ore_io_state index to oc->comps index
+ */
+ unsigned comp = dev_index;
+
+ pooid.oid_device_id = ode->id_node.deviceid;
+ pooid.oid_partition_id = ios->oc->comps[comp].obj.partition;
+ pooid.oid_object_id = ios->oc->comps[comp].obj.id;
+
+ objlayout_io_set_result(&objios->oir, comp,
+ &pooid, osd_pri_2_pnfs_err(oep),
+ dev_offset, dev_len, !ios->reading);
+}
+
+/*
+ * read
+ */
+static void _read_done(struct ore_io_state *ios, void *private)
+{
+ struct objio_state *objios = private;
+ ssize_t status;
+ int ret = ore_check_io(ios, &__on_dev_error);
+
+ /* FIXME: _io_free(ios) can we dealocate the libosd resources; */
+
+ if (likely(!ret))
+ status = ios->length;
+ else
+ status = ret;
+
+ objlayout_read_done(&objios->oir, status, objios->sync);
+}
+
+int objio_read_pagelist(struct nfs_read_data *rdata)
+{
+ struct objio_state *objios;
+ int ret;
+
+ ret = objio_alloc_io_state(NFS_I(rdata->inode)->layout, true,
+ rdata->lseg, rdata->args.pages, rdata->args.pgbase,
+ rdata->args.offset, rdata->args.count, rdata,
+ GFP_KERNEL, &objios);
+ if (unlikely(ret))
+ return ret;
+
+ objios->ios->done = _read_done;
+ dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
+ rdata->args.offset, rdata->args.count);
+ return ore_read(objios->ios);
+}
+
+/*
+ * write
+ */
+static void _write_done(struct ore_io_state *ios, void *private)
+{
+ struct objio_state *objios = private;
+ ssize_t status;
+ int ret = ore_check_io(ios, &__on_dev_error);
+
+ /* FIXME: _io_free(ios) can we dealocate the libosd resources; */
+
+ if (likely(!ret)) {
+ /* FIXME: should be based on the OSD's persistence model
+ * See OSD2r05 Section 4.13 Data persistence model */
+ objios->oir.committed = NFS_FILE_SYNC;
+ status = ios->length;
+ } else {
+ status = ret;
+ }
+
+ objlayout_write_done(&objios->oir, status, objios->sync);
+}
+
+static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
+{
+ struct objio_state *objios = priv;
+ struct nfs_write_data *wdata = objios->oir.rpcdata;
+ pgoff_t index = offset / PAGE_SIZE;
+ struct page *page = find_get_page(wdata->inode->i_mapping, index);
+
+ if (!page) {
+ page = find_or_create_page(wdata->inode->i_mapping,
+ index, GFP_NOFS);
+ if (unlikely(!page)) {
+ dprintk("%s: grab_cache_page Failed index=0x%lx\n",
+ __func__, index);
+ return NULL;
+ }
+ unlock_page(page);
+ }
+ if (PageDirty(page) || PageWriteback(page))
+ *uptodate = true;
+ else
+ *uptodate = PageUptodate(page);
+ dprintk("%s: index=0x%lx uptodate=%d\n", __func__, index, *uptodate);
+ return page;
+}
+
+static void __r4w_put_page(void *priv, struct page *page)
+{
+ dprintk("%s: index=0x%lx\n", __func__, page->index);
+ page_cache_release(page);
+ return;
+}
+
+static const struct _ore_r4w_op _r4w_op = {
+ .get_page = &__r4w_get_page,
+ .put_page = &__r4w_put_page,
+};
+
+int objio_write_pagelist(struct nfs_write_data *wdata, int how)
+{
+ struct objio_state *objios;
+ int ret;
+
+ ret = objio_alloc_io_state(NFS_I(wdata->inode)->layout, false,
+ wdata->lseg, wdata->args.pages, wdata->args.pgbase,
+ wdata->args.offset, wdata->args.count, wdata, GFP_NOFS,
+ &objios);
+ if (unlikely(ret))
+ return ret;
+
+ objios->sync = 0 != (how & FLUSH_SYNC);
+ objios->ios->r4w = &_r4w_op;
+
+ if (!objios->sync)
+ objios->ios->done = _write_done;
+
+ dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
+ wdata->args.offset, wdata->args.count);
+ ret = ore_write(objios->ios);
+ if (unlikely(ret))
+ return ret;
+
+ if (objios->sync)
+ _write_done(objios->ios, objios);
+
+ return 0;
+}
+
+static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *prev, struct nfs_page *req)
+{
+ if (!pnfs_generic_pg_test(pgio, prev, req))
+ return false;
+
+ return pgio->pg_count + req->wb_bytes <=
+ OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
+}
+
+static const struct nfs_pageio_ops objio_pg_read_ops = {
+ .pg_init = pnfs_generic_pg_init_read,
+ .pg_test = objio_pg_test,
+ .pg_doio = pnfs_generic_pg_readpages,
+};
+
+static const struct nfs_pageio_ops objio_pg_write_ops = {
+ .pg_init = pnfs_generic_pg_init_write,
+ .pg_test = objio_pg_test,
+ .pg_doio = pnfs_generic_pg_writepages,
+};
+
+static struct pnfs_layoutdriver_type objlayout_type = {
+ .id = LAYOUT_OSD2_OBJECTS,
+ .name = "LAYOUT_OSD2_OBJECTS",
+ .flags = PNFS_LAYOUTRET_ON_SETATTR |
+ PNFS_LAYOUTRET_ON_ERROR,
+
+ .alloc_layout_hdr = objlayout_alloc_layout_hdr,
+ .free_layout_hdr = objlayout_free_layout_hdr,
+
+ .alloc_lseg = objlayout_alloc_lseg,
+ .free_lseg = objlayout_free_lseg,
+
+ .read_pagelist = objlayout_read_pagelist,
+ .write_pagelist = objlayout_write_pagelist,
+ .pg_read_ops = &objio_pg_read_ops,
+ .pg_write_ops = &objio_pg_write_ops,
+
+ .free_deviceid_node = objio_free_deviceid_node,
+
+ .encode_layoutcommit = objlayout_encode_layoutcommit,
+ .encode_layoutreturn = objlayout_encode_layoutreturn,
+};
+
+MODULE_DESCRIPTION("pNFS Layout Driver for OSD2 objects");
+MODULE_AUTHOR("Benny Halevy <bhalevy@panasas.com>");
+MODULE_LICENSE("GPL");
+
+static int __init
+objlayout_init(void)
+{
+ int ret = pnfs_register_layoutdriver(&objlayout_type);
+
+ if (ret)
+ printk(KERN_INFO
+ "NFS: %s: Registering OSD pNFS Layout Driver failed: error=%d\n",
+ __func__, ret);
+ else
+ printk(KERN_INFO "NFS: %s: Registered OSD pNFS Layout Driver\n",
+ __func__);
+ return ret;
+}
+
+static void __exit
+objlayout_exit(void)
+{
+ pnfs_unregister_layoutdriver(&objlayout_type);
+ printk(KERN_INFO "NFS: %s: Unregistered OSD pNFS Layout Driver\n",
+ __func__);
+}
+
+MODULE_ALIAS("nfs-layouttype4-2");
+
+module_init(objlayout_init);
+module_exit(objlayout_exit);
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
new file mode 100644
index 00000000..595c5fc2
--- /dev/null
+++ b/fs/nfs/objlayout/objlayout.c
@@ -0,0 +1,785 @@
+/*
+ * pNFS Objects layout driver high level definitions
+ *
+ * Copyright (C) 2007 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kmod.h>
+#include <linux/moduleparam.h>
+#include <linux/ratelimit.h>
+#include <scsi/osd_initiator.h>
+#include "objlayout.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+/*
+ * Create a objlayout layout structure for the given inode and return it.
+ */
+struct pnfs_layout_hdr *
+objlayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
+{
+ struct objlayout *objlay;
+
+ objlay = kzalloc(sizeof(struct objlayout), gfp_flags);
+ if (objlay) {
+ spin_lock_init(&objlay->lock);
+ INIT_LIST_HEAD(&objlay->err_list);
+ }
+ dprintk("%s: Return %p\n", __func__, objlay);
+ return &objlay->pnfs_layout;
+}
+
+/*
+ * Free an objlayout layout structure
+ */
+void
+objlayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct objlayout *objlay = OBJLAYOUT(lo);
+
+ dprintk("%s: objlay %p\n", __func__, objlay);
+
+ WARN_ON(!list_empty(&objlay->err_list));
+ kfree(objlay);
+}
+
+/*
+ * Unmarshall layout and store it in pnfslay.
+ */
+struct pnfs_layout_segment *
+objlayout_alloc_lseg(struct pnfs_layout_hdr *pnfslay,
+ struct nfs4_layoutget_res *lgr,
+ gfp_t gfp_flags)
+{
+ int status = -ENOMEM;
+ struct xdr_stream stream;
+ struct xdr_buf buf = {
+ .pages = lgr->layoutp->pages,
+ .page_len = lgr->layoutp->len,
+ .buflen = lgr->layoutp->len,
+ .len = lgr->layoutp->len,
+ };
+ struct page *scratch;
+ struct pnfs_layout_segment *lseg;
+
+ dprintk("%s: Begin pnfslay %p\n", __func__, pnfslay);
+
+ scratch = alloc_page(gfp_flags);
+ if (!scratch)
+ goto err_nofree;
+
+ xdr_init_decode(&stream, &buf, NULL);
+ xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
+
+ status = objio_alloc_lseg(&lseg, pnfslay, &lgr->range, &stream, gfp_flags);
+ if (unlikely(status)) {
+ dprintk("%s: objio_alloc_lseg Return err %d\n", __func__,
+ status);
+ goto err;
+ }
+
+ __free_page(scratch);
+
+ dprintk("%s: Return %p\n", __func__, lseg);
+ return lseg;
+
+err:
+ __free_page(scratch);
+err_nofree:
+ dprintk("%s: Err Return=>%d\n", __func__, status);
+ return ERR_PTR(status);
+}
+
+/*
+ * Free a layout segement
+ */
+void
+objlayout_free_lseg(struct pnfs_layout_segment *lseg)
+{
+ dprintk("%s: freeing layout segment %p\n", __func__, lseg);
+
+ if (unlikely(!lseg))
+ return;
+
+ objio_free_lseg(lseg);
+}
+
+/*
+ * I/O Operations
+ */
+static inline u64
+end_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ end = start + len;
+ return end >= start ? end : NFS4_MAX_UINT64;
+}
+
+/* last octet in a range */
+static inline u64
+last_byte_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ BUG_ON(!len);
+ end = start + len;
+ return end > start ? end - 1 : NFS4_MAX_UINT64;
+}
+
+static void _fix_verify_io_params(struct pnfs_layout_segment *lseg,
+ struct page ***p_pages, unsigned *p_pgbase,
+ u64 offset, unsigned long count)
+{
+ u64 lseg_end_offset;
+
+ BUG_ON(offset < lseg->pls_range.offset);
+ lseg_end_offset = end_offset(lseg->pls_range.offset,
+ lseg->pls_range.length);
+ BUG_ON(offset >= lseg_end_offset);
+ WARN_ON(offset + count > lseg_end_offset);
+
+ if (*p_pgbase > PAGE_SIZE) {
+ dprintk("%s: pgbase(0x%x) > PAGE_SIZE\n", __func__, *p_pgbase);
+ *p_pages += *p_pgbase >> PAGE_SHIFT;
+ *p_pgbase &= ~PAGE_MASK;
+ }
+}
+
+/*
+ * I/O done common code
+ */
+static void
+objlayout_iodone(struct objlayout_io_res *oir)
+{
+ if (likely(oir->status >= 0)) {
+ objio_free_result(oir);
+ } else {
+ struct objlayout *objlay = oir->objlay;
+
+ spin_lock(&objlay->lock);
+ objlay->delta_space_valid = OBJ_DSU_INVALID;
+ list_add(&objlay->err_list, &oir->err_list);
+ spin_unlock(&objlay->lock);
+ }
+}
+
+/*
+ * objlayout_io_set_result - Set an osd_error code on a specific osd comp.
+ *
+ * The @index component IO failed (error returned from target). Register
+ * the error for later reporting at layout-return.
+ */
+void
+objlayout_io_set_result(struct objlayout_io_res *oir, unsigned index,
+ struct pnfs_osd_objid *pooid, int osd_error,
+ u64 offset, u64 length, bool is_write)
+{
+ struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[index];
+
+ BUG_ON(index >= oir->num_comps);
+ if (osd_error) {
+ ioerr->oer_component = *pooid;
+ ioerr->oer_comp_offset = offset;
+ ioerr->oer_comp_length = length;
+ ioerr->oer_iswrite = is_write;
+ ioerr->oer_errno = osd_error;
+
+ dprintk("%s: err[%d]: errno=%d is_write=%d dev(%llx:%llx) "
+ "par=0x%llx obj=0x%llx offset=0x%llx length=0x%llx\n",
+ __func__, index, ioerr->oer_errno,
+ ioerr->oer_iswrite,
+ _DEVID_LO(&ioerr->oer_component.oid_device_id),
+ _DEVID_HI(&ioerr->oer_component.oid_device_id),
+ ioerr->oer_component.oid_partition_id,
+ ioerr->oer_component.oid_object_id,
+ ioerr->oer_comp_offset,
+ ioerr->oer_comp_length);
+ } else {
+ /* User need not call if no error is reported */
+ ioerr->oer_errno = 0;
+ }
+}
+
+/* Function scheduled on rpc workqueue to call ->nfs_readlist_complete().
+ * This is because the osd completion is called with ints-off from
+ * the block layer
+ */
+static void _rpc_read_complete(struct work_struct *work)
+{
+ struct rpc_task *task;
+ struct nfs_read_data *rdata;
+
+ dprintk("%s enter\n", __func__);
+ task = container_of(work, struct rpc_task, u.tk_work);
+ rdata = container_of(task, struct nfs_read_data, task);
+
+ pnfs_ld_read_done(rdata);
+}
+
+void
+objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
+{
+ struct nfs_read_data *rdata = oir->rpcdata;
+
+ oir->status = rdata->task.tk_status = status;
+ if (status >= 0)
+ rdata->res.count = status;
+ else
+ rdata->pnfs_error = status;
+ objlayout_iodone(oir);
+ /* must not use oir after this point */
+
+ dprintk("%s: Return status=%zd eof=%d sync=%d\n", __func__,
+ status, rdata->res.eof, sync);
+
+ if (sync)
+ pnfs_ld_read_done(rdata);
+ else {
+ INIT_WORK(&rdata->task.u.tk_work, _rpc_read_complete);
+ schedule_work(&rdata->task.u.tk_work);
+ }
+}
+
+/*
+ * Perform sync or async reads.
+ */
+enum pnfs_try_status
+objlayout_read_pagelist(struct nfs_read_data *rdata)
+{
+ loff_t offset = rdata->args.offset;
+ size_t count = rdata->args.count;
+ int err;
+ loff_t eof;
+
+ eof = i_size_read(rdata->inode);
+ if (unlikely(offset + count > eof)) {
+ if (offset >= eof) {
+ err = 0;
+ rdata->res.count = 0;
+ rdata->res.eof = 1;
+ /*FIXME: do we need to call pnfs_ld_read_done() */
+ goto out;
+ }
+ count = eof - offset;
+ }
+
+ rdata->res.eof = (offset + count) >= eof;
+ _fix_verify_io_params(rdata->lseg, &rdata->args.pages,
+ &rdata->args.pgbase,
+ rdata->args.offset, rdata->args.count);
+
+ dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n",
+ __func__, rdata->inode->i_ino, offset, count, rdata->res.eof);
+
+ err = objio_read_pagelist(rdata);
+ out:
+ if (unlikely(err)) {
+ rdata->pnfs_error = err;
+ dprintk("%s: Returned Error %d\n", __func__, err);
+ return PNFS_NOT_ATTEMPTED;
+ }
+ return PNFS_ATTEMPTED;
+}
+
+/* Function scheduled on rpc workqueue to call ->nfs_writelist_complete().
+ * This is because the osd completion is called with ints-off from
+ * the block layer
+ */
+static void _rpc_write_complete(struct work_struct *work)
+{
+ struct rpc_task *task;
+ struct nfs_write_data *wdata;
+
+ dprintk("%s enter\n", __func__);
+ task = container_of(work, struct rpc_task, u.tk_work);
+ wdata = container_of(task, struct nfs_write_data, task);
+
+ pnfs_ld_write_done(wdata);
+}
+
+void
+objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
+{
+ struct nfs_write_data *wdata = oir->rpcdata;
+
+ oir->status = wdata->task.tk_status = status;
+ if (status >= 0) {
+ wdata->res.count = status;
+ wdata->verf.committed = oir->committed;
+ } else {
+ wdata->pnfs_error = status;
+ }
+ objlayout_iodone(oir);
+ /* must not use oir after this point */
+
+ dprintk("%s: Return status %zd committed %d sync=%d\n", __func__,
+ status, wdata->verf.committed, sync);
+
+ if (sync)
+ pnfs_ld_write_done(wdata);
+ else {
+ INIT_WORK(&wdata->task.u.tk_work, _rpc_write_complete);
+ schedule_work(&wdata->task.u.tk_work);
+ }
+}
+
+/*
+ * Perform sync or async writes.
+ */
+enum pnfs_try_status
+objlayout_write_pagelist(struct nfs_write_data *wdata,
+ int how)
+{
+ int err;
+
+ _fix_verify_io_params(wdata->lseg, &wdata->args.pages,
+ &wdata->args.pgbase,
+ wdata->args.offset, wdata->args.count);
+
+ err = objio_write_pagelist(wdata, how);
+ if (unlikely(err)) {
+ wdata->pnfs_error = err;
+ dprintk("%s: Returned Error %d\n", __func__, err);
+ return PNFS_NOT_ATTEMPTED;
+ }
+ return PNFS_ATTEMPTED;
+}
+
+void
+objlayout_encode_layoutcommit(struct pnfs_layout_hdr *pnfslay,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *args)
+{
+ struct objlayout *objlay = OBJLAYOUT(pnfslay);
+ struct pnfs_osd_layoutupdate lou;
+ __be32 *start;
+
+ dprintk("%s: Begin\n", __func__);
+
+ spin_lock(&objlay->lock);
+ lou.dsu_valid = (objlay->delta_space_valid == OBJ_DSU_VALID);
+ lou.dsu_delta = objlay->delta_space_used;
+ objlay->delta_space_used = 0;
+ objlay->delta_space_valid = OBJ_DSU_INIT;
+ lou.olu_ioerr_flag = !list_empty(&objlay->err_list);
+ spin_unlock(&objlay->lock);
+
+ start = xdr_reserve_space(xdr, 4);
+
+ BUG_ON(pnfs_osd_xdr_encode_layoutupdate(xdr, &lou));
+
+ *start = cpu_to_be32((xdr->p - start - 1) * 4);
+
+ dprintk("%s: Return delta_space_used %lld err %d\n", __func__,
+ lou.dsu_delta, lou.olu_ioerr_flag);
+}
+
+static int
+err_prio(u32 oer_errno)
+{
+ switch (oer_errno) {
+ case 0:
+ return 0;
+
+ case PNFS_OSD_ERR_RESOURCE:
+ return OSD_ERR_PRI_RESOURCE;
+ case PNFS_OSD_ERR_BAD_CRED:
+ return OSD_ERR_PRI_BAD_CRED;
+ case PNFS_OSD_ERR_NO_ACCESS:
+ return OSD_ERR_PRI_NO_ACCESS;
+ case PNFS_OSD_ERR_UNREACHABLE:
+ return OSD_ERR_PRI_UNREACHABLE;
+ case PNFS_OSD_ERR_NOT_FOUND:
+ return OSD_ERR_PRI_NOT_FOUND;
+ case PNFS_OSD_ERR_NO_SPACE:
+ return OSD_ERR_PRI_NO_SPACE;
+ default:
+ WARN_ON(1);
+ /* fallthrough */
+ case PNFS_OSD_ERR_EIO:
+ return OSD_ERR_PRI_EIO;
+ }
+}
+
+static void
+merge_ioerr(struct pnfs_osd_ioerr *dest_err,
+ const struct pnfs_osd_ioerr *src_err)
+{
+ u64 dest_end, src_end;
+
+ if (!dest_err->oer_errno) {
+ *dest_err = *src_err;
+ /* accumulated device must be blank */
+ memset(&dest_err->oer_component.oid_device_id, 0,
+ sizeof(dest_err->oer_component.oid_device_id));
+
+ return;
+ }
+
+ if (dest_err->oer_component.oid_partition_id !=
+ src_err->oer_component.oid_partition_id)
+ dest_err->oer_component.oid_partition_id = 0;
+
+ if (dest_err->oer_component.oid_object_id !=
+ src_err->oer_component.oid_object_id)
+ dest_err->oer_component.oid_object_id = 0;
+
+ if (dest_err->oer_comp_offset > src_err->oer_comp_offset)
+ dest_err->oer_comp_offset = src_err->oer_comp_offset;
+
+ dest_end = end_offset(dest_err->oer_comp_offset,
+ dest_err->oer_comp_length);
+ src_end = end_offset(src_err->oer_comp_offset,
+ src_err->oer_comp_length);
+ if (dest_end < src_end)
+ dest_end = src_end;
+
+ dest_err->oer_comp_length = dest_end - dest_err->oer_comp_offset;
+
+ if ((src_err->oer_iswrite == dest_err->oer_iswrite) &&
+ (err_prio(src_err->oer_errno) > err_prio(dest_err->oer_errno))) {
+ dest_err->oer_errno = src_err->oer_errno;
+ } else if (src_err->oer_iswrite) {
+ dest_err->oer_iswrite = true;
+ dest_err->oer_errno = src_err->oer_errno;
+ }
+}
+
+static void
+encode_accumulated_error(struct objlayout *objlay, __be32 *p)
+{
+ struct objlayout_io_res *oir, *tmp;
+ struct pnfs_osd_ioerr accumulated_err = {.oer_errno = 0};
+
+ list_for_each_entry_safe(oir, tmp, &objlay->err_list, err_list) {
+ unsigned i;
+
+ for (i = 0; i < oir->num_comps; i++) {
+ struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[i];
+
+ if (!ioerr->oer_errno)
+ continue;
+
+ printk(KERN_ERR "NFS: %s: err[%d]: errno=%d "
+ "is_write=%d dev(%llx:%llx) par=0x%llx "
+ "obj=0x%llx offset=0x%llx length=0x%llx\n",
+ __func__, i, ioerr->oer_errno,
+ ioerr->oer_iswrite,
+ _DEVID_LO(&ioerr->oer_component.oid_device_id),
+ _DEVID_HI(&ioerr->oer_component.oid_device_id),
+ ioerr->oer_component.oid_partition_id,
+ ioerr->oer_component.oid_object_id,
+ ioerr->oer_comp_offset,
+ ioerr->oer_comp_length);
+
+ merge_ioerr(&accumulated_err, ioerr);
+ }
+ list_del(&oir->err_list);
+ objio_free_result(oir);
+ }
+
+ pnfs_osd_xdr_encode_ioerr(p, &accumulated_err);
+}
+
+void
+objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args)
+{
+ struct objlayout *objlay = OBJLAYOUT(pnfslay);
+ struct objlayout_io_res *oir, *tmp;
+ __be32 *start;
+
+ dprintk("%s: Begin\n", __func__);
+ start = xdr_reserve_space(xdr, 4);
+ BUG_ON(!start);
+
+ spin_lock(&objlay->lock);
+
+ list_for_each_entry_safe(oir, tmp, &objlay->err_list, err_list) {
+ __be32 *last_xdr = NULL, *p;
+ unsigned i;
+ int res = 0;
+
+ for (i = 0; i < oir->num_comps; i++) {
+ struct pnfs_osd_ioerr *ioerr = &oir->ioerrs[i];
+
+ if (!ioerr->oer_errno)
+ continue;
+
+ dprintk("%s: err[%d]: errno=%d is_write=%d "
+ "dev(%llx:%llx) par=0x%llx obj=0x%llx "
+ "offset=0x%llx length=0x%llx\n",
+ __func__, i, ioerr->oer_errno,
+ ioerr->oer_iswrite,
+ _DEVID_LO(&ioerr->oer_component.oid_device_id),
+ _DEVID_HI(&ioerr->oer_component.oid_device_id),
+ ioerr->oer_component.oid_partition_id,
+ ioerr->oer_component.oid_object_id,
+ ioerr->oer_comp_offset,
+ ioerr->oer_comp_length);
+
+ p = pnfs_osd_xdr_ioerr_reserve_space(xdr);
+ if (unlikely(!p)) {
+ res = -E2BIG;
+ break; /* accumulated_error */
+ }
+
+ last_xdr = p;
+ pnfs_osd_xdr_encode_ioerr(p, &oir->ioerrs[i]);
+ }
+
+ /* TODO: use xdr_write_pages */
+ if (unlikely(res)) {
+ /* no space for even one error descriptor */
+ BUG_ON(!last_xdr);
+
+ /* we've encountered a situation with lots and lots of
+ * errors and no space to encode them all. Use the last
+ * available slot to report the union of all the
+ * remaining errors.
+ */
+ encode_accumulated_error(objlay, last_xdr);
+ goto loop_done;
+ }
+ list_del(&oir->err_list);
+ objio_free_result(oir);
+ }
+loop_done:
+ spin_unlock(&objlay->lock);
+
+ *start = cpu_to_be32((xdr->p - start - 1) * 4);
+ dprintk("%s: Return\n", __func__);
+}
+
+
+/*
+ * Get Device Info API for io engines
+ */
+struct objlayout_deviceinfo {
+ struct page *page;
+ struct pnfs_osd_deviceaddr da; /* This must be last */
+};
+
+/* Initialize and call nfs_getdeviceinfo, then decode and return a
+ * "struct pnfs_osd_deviceaddr *" Eventually objlayout_put_deviceinfo()
+ * should be called.
+ */
+int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
+ struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
+ gfp_t gfp_flags)
+{
+ struct objlayout_deviceinfo *odi;
+ struct pnfs_device pd;
+ struct page *page, **pages;
+ u32 *p;
+ int err;
+
+ page = alloc_page(gfp_flags);
+ if (!page)
+ return -ENOMEM;
+
+ pages = &page;
+ pd.pages = pages;
+
+ memcpy(&pd.dev_id, d_id, sizeof(*d_id));
+ pd.layout_type = LAYOUT_OSD2_OBJECTS;
+ pd.pages = &page;
+ pd.pgbase = 0;
+ pd.pglen = PAGE_SIZE;
+ pd.mincount = 0;
+
+ err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd);
+ dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err);
+ if (err)
+ goto err_out;
+
+ p = page_address(page);
+ odi = kzalloc(sizeof(*odi), gfp_flags);
+ if (!odi) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ pnfs_osd_xdr_decode_deviceaddr(&odi->da, p);
+ odi->page = page;
+ *deviceaddr = &odi->da;
+ return 0;
+
+err_out:
+ __free_page(page);
+ return err;
+}
+
+void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr)
+{
+ struct objlayout_deviceinfo *odi = container_of(deviceaddr,
+ struct objlayout_deviceinfo,
+ da);
+
+ __free_page(odi->page);
+ kfree(odi);
+}
+
+enum {
+ OBJLAYOUT_MAX_URI_LEN = 256, OBJLAYOUT_MAX_OSDNAME_LEN = 64,
+ OBJLAYOUT_MAX_SYSID_HEX_LEN = OSD_SYSTEMID_LEN * 2 + 1,
+ OSD_LOGIN_UPCALL_PATHLEN = 256
+};
+
+static char osd_login_prog[OSD_LOGIN_UPCALL_PATHLEN] = "/sbin/osd_login";
+
+module_param_string(osd_login_prog, osd_login_prog, sizeof(osd_login_prog),
+ 0600);
+MODULE_PARM_DESC(osd_login_prog, "Path to the osd_login upcall program");
+
+struct __auto_login {
+ char uri[OBJLAYOUT_MAX_URI_LEN];
+ char osdname[OBJLAYOUT_MAX_OSDNAME_LEN];
+ char systemid_hex[OBJLAYOUT_MAX_SYSID_HEX_LEN];
+};
+
+static int __objlayout_upcall(struct __auto_login *login)
+{
+ static char *envp[] = { "HOME=/",
+ "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ NULL
+ };
+ char *argv[8];
+ int ret;
+
+ if (unlikely(!osd_login_prog[0])) {
+ dprintk("%s: osd_login_prog is disabled\n", __func__);
+ return -EACCES;
+ }
+
+ dprintk("%s uri: %s\n", __func__, login->uri);
+ dprintk("%s osdname %s\n", __func__, login->osdname);
+ dprintk("%s systemid_hex %s\n", __func__, login->systemid_hex);
+
+ argv[0] = (char *)osd_login_prog;
+ argv[1] = "-u";
+ argv[2] = login->uri;
+ argv[3] = "-o";
+ argv[4] = login->osdname;
+ argv[5] = "-s";
+ argv[6] = login->systemid_hex;
+ argv[7] = NULL;
+
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ /*
+ * Disable the upcall mechanism if we're getting an ENOENT or
+ * EACCES error. The admin can re-enable it on the fly by using
+ * sysfs to set the objlayoutdriver.osd_login_prog module parameter once
+ * the problem has been fixed.
+ */
+ if (ret == -ENOENT || ret == -EACCES) {
+ printk(KERN_ERR "PNFS-OBJ: %s was not found please set "
+ "objlayoutdriver.osd_login_prog kernel parameter!\n",
+ osd_login_prog);
+ osd_login_prog[0] = '\0';
+ }
+ dprintk("%s %s return value: %d\n", __func__, osd_login_prog, ret);
+
+ return ret;
+}
+
+/* Assume dest is all zeros */
+static void __copy_nfsS_and_zero_terminate(struct nfs4_string s,
+ char *dest, int max_len,
+ const char *var_name)
+{
+ if (!s.len)
+ return;
+
+ if (s.len >= max_len) {
+ pr_warn_ratelimited(
+ "objlayout_autologin: %s: s.len(%d) >= max_len(%d)",
+ var_name, s.len, max_len);
+ s.len = max_len - 1; /* space for null terminator */
+ }
+
+ memcpy(dest, s.data, s.len);
+}
+
+/* Assume sysid is all zeros */
+static void _sysid_2_hex(struct nfs4_string s,
+ char sysid[OBJLAYOUT_MAX_SYSID_HEX_LEN])
+{
+ int i;
+ char *cur;
+
+ if (!s.len)
+ return;
+
+ if (s.len != OSD_SYSTEMID_LEN) {
+ pr_warn_ratelimited(
+ "objlayout_autologin: systemid_len(%d) != OSD_SYSTEMID_LEN",
+ s.len);
+ if (s.len > OSD_SYSTEMID_LEN)
+ s.len = OSD_SYSTEMID_LEN;
+ }
+
+ cur = sysid;
+ for (i = 0; i < s.len; i++)
+ cur = hex_byte_pack(cur, s.data[i]);
+}
+
+int objlayout_autologin(struct pnfs_osd_deviceaddr *deviceaddr)
+{
+ int rc;
+ struct __auto_login login;
+
+ if (!deviceaddr->oda_targetaddr.ota_netaddr.r_addr.len)
+ return -ENODEV;
+
+ memset(&login, 0, sizeof(login));
+ __copy_nfsS_and_zero_terminate(
+ deviceaddr->oda_targetaddr.ota_netaddr.r_addr,
+ login.uri, sizeof(login.uri), "URI");
+
+ __copy_nfsS_and_zero_terminate(
+ deviceaddr->oda_osdname,
+ login.osdname, sizeof(login.osdname), "OSDNAME");
+
+ _sysid_2_hex(deviceaddr->oda_systemid, login.systemid_hex);
+
+ rc = __objlayout_upcall(&login);
+ if (rc > 0) /* script returns positive values */
+ rc = -ENODEV;
+
+ return rc;
+}
diff --git a/fs/nfs/objlayout/objlayout.h b/fs/nfs/objlayout/objlayout.h
new file mode 100644
index 00000000..880ba086
--- /dev/null
+++ b/fs/nfs/objlayout/objlayout.h
@@ -0,0 +1,189 @@
+/*
+ * Data types and function declerations for interfacing with the
+ * pNFS standard object layout driver.
+ *
+ * Copyright (C) 2007 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _OBJLAYOUT_H
+#define _OBJLAYOUT_H
+
+#include <linux/nfs_fs.h>
+#include <linux/pnfs_osd_xdr.h>
+#include "../pnfs.h"
+
+/*
+ * per-inode layout
+ */
+struct objlayout {
+ struct pnfs_layout_hdr pnfs_layout;
+
+ /* for layout_commit */
+ enum osd_delta_space_valid_enum {
+ OBJ_DSU_INIT = 0,
+ OBJ_DSU_VALID,
+ OBJ_DSU_INVALID,
+ } delta_space_valid;
+ s64 delta_space_used; /* consumed by write ops */
+
+ /* for layout_return */
+ spinlock_t lock;
+ struct list_head err_list;
+};
+
+static inline struct objlayout *
+OBJLAYOUT(struct pnfs_layout_hdr *lo)
+{
+ return container_of(lo, struct objlayout, pnfs_layout);
+}
+
+/*
+ * per-I/O operation state
+ * embedded in objects provider io_state data structure
+ */
+struct objlayout_io_res {
+ struct objlayout *objlay;
+
+ void *rpcdata;
+ int status; /* res */
+ int committed; /* res */
+
+ /* Error reporting (layout_return) */
+ struct list_head err_list;
+ unsigned num_comps;
+ /* Pointer to array of error descriptors of size num_comps.
+ * It should contain as many entries as devices in the osd_layout
+ * that participate in the I/O. It is up to the io_engine to allocate
+ * needed space and set num_comps.
+ */
+ struct pnfs_osd_ioerr *ioerrs;
+};
+
+static inline
+void objlayout_init_ioerrs(struct objlayout_io_res *oir, unsigned num_comps,
+ struct pnfs_osd_ioerr *ioerrs, void *rpcdata,
+ struct pnfs_layout_hdr *pnfs_layout_type)
+{
+ oir->objlay = OBJLAYOUT(pnfs_layout_type);
+ oir->rpcdata = rpcdata;
+ INIT_LIST_HEAD(&oir->err_list);
+ oir->num_comps = num_comps;
+ oir->ioerrs = ioerrs;
+}
+
+/*
+ * Raid engine I/O API
+ */
+extern int objio_alloc_lseg(struct pnfs_layout_segment **outp,
+ struct pnfs_layout_hdr *pnfslay,
+ struct pnfs_layout_range *range,
+ struct xdr_stream *xdr,
+ gfp_t gfp_flags);
+extern void objio_free_lseg(struct pnfs_layout_segment *lseg);
+
+/* objio_free_result will free these @oir structs recieved from
+ * objlayout_{read,write}_done
+ */
+extern void objio_free_result(struct objlayout_io_res *oir);
+
+extern int objio_read_pagelist(struct nfs_read_data *rdata);
+extern int objio_write_pagelist(struct nfs_write_data *wdata, int how);
+
+/*
+ * callback API
+ */
+extern void objlayout_io_set_result(struct objlayout_io_res *oir,
+ unsigned index, struct pnfs_osd_objid *pooid,
+ int osd_error, u64 offset, u64 length, bool is_write);
+
+static inline void
+objlayout_add_delta_space_used(struct objlayout *objlay, s64 space_used)
+{
+ /* If one of the I/Os errored out and the delta_space_used was
+ * invalid we render the complete report as invalid. Protocol mandate
+ * the DSU be accurate or not reported.
+ */
+ spin_lock(&objlay->lock);
+ if (objlay->delta_space_valid != OBJ_DSU_INVALID) {
+ objlay->delta_space_valid = OBJ_DSU_VALID;
+ objlay->delta_space_used += space_used;
+ }
+ spin_unlock(&objlay->lock);
+}
+
+extern void objlayout_read_done(struct objlayout_io_res *oir,
+ ssize_t status, bool sync);
+extern void objlayout_write_done(struct objlayout_io_res *oir,
+ ssize_t status, bool sync);
+
+extern int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
+ struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
+ gfp_t gfp_flags);
+extern void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr);
+
+/*
+ * exported generic objects function vectors
+ */
+
+extern struct pnfs_layout_hdr *objlayout_alloc_layout_hdr(struct inode *, gfp_t gfp_flags);
+extern void objlayout_free_layout_hdr(struct pnfs_layout_hdr *);
+
+extern struct pnfs_layout_segment *objlayout_alloc_lseg(
+ struct pnfs_layout_hdr *,
+ struct nfs4_layoutget_res *,
+ gfp_t gfp_flags);
+extern void objlayout_free_lseg(struct pnfs_layout_segment *);
+
+extern enum pnfs_try_status objlayout_read_pagelist(
+ struct nfs_read_data *);
+
+extern enum pnfs_try_status objlayout_write_pagelist(
+ struct nfs_write_data *,
+ int how);
+
+extern void objlayout_encode_layoutcommit(
+ struct pnfs_layout_hdr *,
+ struct xdr_stream *,
+ const struct nfs4_layoutcommit_args *);
+
+extern void objlayout_encode_layoutreturn(
+ struct pnfs_layout_hdr *,
+ struct xdr_stream *,
+ const struct nfs4_layoutreturn_args *);
+
+extern int objlayout_autologin(struct pnfs_osd_deviceaddr *deviceaddr);
+
+#endif /* _OBJLAYOUT_H */
diff --git a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
new file mode 100644
index 00000000..b3918f7a
--- /dev/null
+++ b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
@@ -0,0 +1,415 @@
+/*
+ * Object-Based pNFS Layout XDR layer
+ *
+ * Copyright (C) 2007 Panasas Inc. [year of first publication]
+ * All rights reserved.
+ *
+ * Benny Halevy <bhalevy@panasas.com>
+ * Boaz Harrosh <bharrosh@panasas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * See the file COPYING included with this distribution for more details.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the Panasas company nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/pnfs_osd_xdr.h>
+
+#define NFSDBG_FACILITY NFSDBG_PNFS_LD
+
+/*
+ * The following implementation is based on RFC5664
+ */
+
+/*
+ * struct pnfs_osd_objid {
+ * struct nfs4_deviceid oid_device_id;
+ * u64 oid_partition_id;
+ * u64 oid_object_id;
+ * }; // xdr size 32 bytes
+ */
+static __be32 *
+_osd_xdr_decode_objid(__be32 *p, struct pnfs_osd_objid *objid)
+{
+ p = xdr_decode_opaque_fixed(p, objid->oid_device_id.data,
+ sizeof(objid->oid_device_id.data));
+
+ p = xdr_decode_hyper(p, &objid->oid_partition_id);
+ p = xdr_decode_hyper(p, &objid->oid_object_id);
+ return p;
+}
+/*
+ * struct pnfs_osd_opaque_cred {
+ * u32 cred_len;
+ * void *cred;
+ * }; // xdr size [variable]
+ * The return pointers are from the xdr buffer
+ */
+static int
+_osd_xdr_decode_opaque_cred(struct pnfs_osd_opaque_cred *opaque_cred,
+ struct xdr_stream *xdr)
+{
+ __be32 *p = xdr_inline_decode(xdr, 1);
+
+ if (!p)
+ return -EINVAL;
+
+ opaque_cred->cred_len = be32_to_cpu(*p++);
+
+ p = xdr_inline_decode(xdr, opaque_cred->cred_len);
+ if (!p)
+ return -EINVAL;
+
+ opaque_cred->cred = p;
+ return 0;
+}
+
+/*
+ * struct pnfs_osd_object_cred {
+ * struct pnfs_osd_objid oc_object_id;
+ * u32 oc_osd_version;
+ * u32 oc_cap_key_sec;
+ * struct pnfs_osd_opaque_cred oc_cap_key
+ * struct pnfs_osd_opaque_cred oc_cap;
+ * }; // xdr size 32 + 4 + 4 + [variable] + [variable]
+ */
+static int
+_osd_xdr_decode_object_cred(struct pnfs_osd_object_cred *comp,
+ struct xdr_stream *xdr)
+{
+ __be32 *p = xdr_inline_decode(xdr, 32 + 4 + 4);
+ int ret;
+
+ if (!p)
+ return -EIO;
+
+ p = _osd_xdr_decode_objid(p, &comp->oc_object_id);
+ comp->oc_osd_version = be32_to_cpup(p++);
+ comp->oc_cap_key_sec = be32_to_cpup(p);
+
+ ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap_key, xdr);
+ if (unlikely(ret))
+ return ret;
+
+ ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap, xdr);
+ return ret;
+}
+
+/*
+ * struct pnfs_osd_data_map {
+ * u32 odm_num_comps;
+ * u64 odm_stripe_unit;
+ * u32 odm_group_width;
+ * u32 odm_group_depth;
+ * u32 odm_mirror_cnt;
+ * u32 odm_raid_algorithm;
+ * }; // xdr size 4 + 8 + 4 + 4 + 4 + 4
+ */
+static inline int
+_osd_data_map_xdr_sz(void)
+{
+ return 4 + 8 + 4 + 4 + 4 + 4;
+}
+
+static __be32 *
+_osd_xdr_decode_data_map(__be32 *p, struct pnfs_osd_data_map *data_map)
+{
+ data_map->odm_num_comps = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &data_map->odm_stripe_unit);
+ data_map->odm_group_width = be32_to_cpup(p++);
+ data_map->odm_group_depth = be32_to_cpup(p++);
+ data_map->odm_mirror_cnt = be32_to_cpup(p++);
+ data_map->odm_raid_algorithm = be32_to_cpup(p++);
+ dprintk("%s: odm_num_comps=%u odm_stripe_unit=%llu odm_group_width=%u "
+ "odm_group_depth=%u odm_mirror_cnt=%u odm_raid_algorithm=%u\n",
+ __func__,
+ data_map->odm_num_comps,
+ (unsigned long long)data_map->odm_stripe_unit,
+ data_map->odm_group_width,
+ data_map->odm_group_depth,
+ data_map->odm_mirror_cnt,
+ data_map->odm_raid_algorithm);
+ return p;
+}
+
+int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
+ struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr)
+{
+ __be32 *p;
+
+ memset(iter, 0, sizeof(*iter));
+
+ p = xdr_inline_decode(xdr, _osd_data_map_xdr_sz() + 4 + 4);
+ if (unlikely(!p))
+ return -EINVAL;
+
+ p = _osd_xdr_decode_data_map(p, &layout->olo_map);
+ layout->olo_comps_index = be32_to_cpup(p++);
+ layout->olo_num_comps = be32_to_cpup(p++);
+ dprintk("%s: olo_comps_index=%d olo_num_comps=%d\n", __func__,
+ layout->olo_comps_index, layout->olo_num_comps);
+
+ iter->total_comps = layout->olo_num_comps;
+ return 0;
+}
+
+bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
+ struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
+ int *err)
+{
+ BUG_ON(iter->decoded_comps > iter->total_comps);
+ if (iter->decoded_comps == iter->total_comps)
+ return false;
+
+ *err = _osd_xdr_decode_object_cred(comp, xdr);
+ if (unlikely(*err)) {
+ dprintk("%s: _osd_xdr_decode_object_cred=>%d decoded_comps=%d "
+ "total_comps=%d\n", __func__, *err,
+ iter->decoded_comps, iter->total_comps);
+ return false; /* stop the loop */
+ }
+ dprintk("%s: dev(%llx:%llx) par=0x%llx obj=0x%llx "
+ "key_len=%u cap_len=%u\n",
+ __func__,
+ _DEVID_LO(&comp->oc_object_id.oid_device_id),
+ _DEVID_HI(&comp->oc_object_id.oid_device_id),
+ comp->oc_object_id.oid_partition_id,
+ comp->oc_object_id.oid_object_id,
+ comp->oc_cap_key.cred_len, comp->oc_cap.cred_len);
+
+ iter->decoded_comps++;
+ return true;
+}
+
+/*
+ * Get Device Information Decoding
+ *
+ * Note: since Device Information is currently done synchronously, all
+ * variable strings fields are left inside the rpc buffer and are only
+ * pointed to by the pnfs_osd_deviceaddr members. So the read buffer
+ * should not be freed while the returned information is in use.
+ */
+/*
+ *struct nfs4_string {
+ * unsigned int len;
+ * char *data;
+ *}; // size [variable]
+ * NOTE: Returned string points to inside the XDR buffer
+ */
+static __be32 *
+__read_u8_opaque(__be32 *p, struct nfs4_string *str)
+{
+ str->len = be32_to_cpup(p++);
+ str->data = (char *)p;
+
+ p += XDR_QUADLEN(str->len);
+ return p;
+}
+
+/*
+ * struct pnfs_osd_targetid {
+ * u32 oti_type;
+ * struct nfs4_string oti_scsi_device_id;
+ * };// size 4 + [variable]
+ */
+static __be32 *
+__read_targetid(__be32 *p, struct pnfs_osd_targetid* targetid)
+{
+ u32 oti_type;
+
+ oti_type = be32_to_cpup(p++);
+ targetid->oti_type = oti_type;
+
+ switch (oti_type) {
+ case OBJ_TARGET_SCSI_NAME:
+ case OBJ_TARGET_SCSI_DEVICE_ID:
+ p = __read_u8_opaque(p, &targetid->oti_scsi_device_id);
+ }
+
+ return p;
+}
+
+/*
+ * struct pnfs_osd_net_addr {
+ * struct nfs4_string r_netid;
+ * struct nfs4_string r_addr;
+ * };
+ */
+static __be32 *
+__read_net_addr(__be32 *p, struct pnfs_osd_net_addr* netaddr)
+{
+ p = __read_u8_opaque(p, &netaddr->r_netid);
+ p = __read_u8_opaque(p, &netaddr->r_addr);
+
+ return p;
+}
+
+/*
+ * struct pnfs_osd_targetaddr {
+ * u32 ota_available;
+ * struct pnfs_osd_net_addr ota_netaddr;
+ * };
+ */
+static __be32 *
+__read_targetaddr(__be32 *p, struct pnfs_osd_targetaddr *targetaddr)
+{
+ u32 ota_available;
+
+ ota_available = be32_to_cpup(p++);
+ targetaddr->ota_available = ota_available;
+
+ if (ota_available)
+ p = __read_net_addr(p, &targetaddr->ota_netaddr);
+
+
+ return p;
+}
+
+/*
+ * struct pnfs_osd_deviceaddr {
+ * struct pnfs_osd_targetid oda_targetid;
+ * struct pnfs_osd_targetaddr oda_targetaddr;
+ * u8 oda_lun[8];
+ * struct nfs4_string oda_systemid;
+ * struct pnfs_osd_object_cred oda_root_obj_cred;
+ * struct nfs4_string oda_osdname;
+ * };
+ */
+
+/* We need this version for the pnfs_osd_xdr_decode_deviceaddr which does
+ * not have an xdr_stream
+ */
+static __be32 *
+__read_opaque_cred(__be32 *p,
+ struct pnfs_osd_opaque_cred *opaque_cred)
+{
+ opaque_cred->cred_len = be32_to_cpu(*p++);
+ opaque_cred->cred = p;
+ return p + XDR_QUADLEN(opaque_cred->cred_len);
+}
+
+static __be32 *
+__read_object_cred(__be32 *p, struct pnfs_osd_object_cred *comp)
+{
+ p = _osd_xdr_decode_objid(p, &comp->oc_object_id);
+ comp->oc_osd_version = be32_to_cpup(p++);
+ comp->oc_cap_key_sec = be32_to_cpup(p++);
+
+ p = __read_opaque_cred(p, &comp->oc_cap_key);
+ p = __read_opaque_cred(p, &comp->oc_cap);
+ return p;
+}
+
+void pnfs_osd_xdr_decode_deviceaddr(
+ struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p)
+{
+ p = __read_targetid(p, &deviceaddr->oda_targetid);
+
+ p = __read_targetaddr(p, &deviceaddr->oda_targetaddr);
+
+ p = xdr_decode_opaque_fixed(p, deviceaddr->oda_lun,
+ sizeof(deviceaddr->oda_lun));
+
+ p = __read_u8_opaque(p, &deviceaddr->oda_systemid);
+
+ p = __read_object_cred(p, &deviceaddr->oda_root_obj_cred);
+
+ p = __read_u8_opaque(p, &deviceaddr->oda_osdname);
+
+ /* libosd likes this terminated in dbg. It's last, so no problems */
+ deviceaddr->oda_osdname.data[deviceaddr->oda_osdname.len] = 0;
+}
+
+/*
+ * struct pnfs_osd_layoutupdate {
+ * u32 dsu_valid;
+ * s64 dsu_delta;
+ * u32 olu_ioerr_flag;
+ * }; xdr size 4 + 8 + 4
+ */
+int
+pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
+ struct pnfs_osd_layoutupdate *lou)
+{
+ __be32 *p = xdr_reserve_space(xdr, 4 + 8 + 4);
+
+ if (!p)
+ return -E2BIG;
+
+ *p++ = cpu_to_be32(lou->dsu_valid);
+ if (lou->dsu_valid)
+ p = xdr_encode_hyper(p, lou->dsu_delta);
+ *p++ = cpu_to_be32(lou->olu_ioerr_flag);
+ return 0;
+}
+
+/*
+ * struct pnfs_osd_objid {
+ * struct nfs4_deviceid oid_device_id;
+ * u64 oid_partition_id;
+ * u64 oid_object_id;
+ * }; // xdr size 32 bytes
+ */
+static inline __be32 *
+pnfs_osd_xdr_encode_objid(__be32 *p, struct pnfs_osd_objid *object_id)
+{
+ p = xdr_encode_opaque_fixed(p, &object_id->oid_device_id.data,
+ sizeof(object_id->oid_device_id.data));
+ p = xdr_encode_hyper(p, object_id->oid_partition_id);
+ p = xdr_encode_hyper(p, object_id->oid_object_id);
+
+ return p;
+}
+
+/*
+ * struct pnfs_osd_ioerr {
+ * struct pnfs_osd_objid oer_component;
+ * u64 oer_comp_offset;
+ * u64 oer_comp_length;
+ * u32 oer_iswrite;
+ * u32 oer_errno;
+ * }; // xdr size 32 + 24 bytes
+ */
+void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr)
+{
+ p = pnfs_osd_xdr_encode_objid(p, &ioerr->oer_component);
+ p = xdr_encode_hyper(p, ioerr->oer_comp_offset);
+ p = xdr_encode_hyper(p, ioerr->oer_comp_length);
+ *p++ = cpu_to_be32(ioerr->oer_iswrite);
+ *p = cpu_to_be32(ioerr->oer_errno);
+}
+
+__be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr)
+{
+ __be32 *p;
+
+ p = xdr_reserve_space(xdr, 32 + 24);
+ if (unlikely(!p))
+ dprintk("%s: out of xdr space\n", __func__);
+
+ return p;
+}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
new file mode 100644
index 00000000..d21fceaa
--- /dev/null
+++ b/fs/nfs/pagelist.c
@@ -0,0 +1,415 @@
+/*
+ * linux/fs/nfs/pagelist.c
+ *
+ * A set of helper functions for managing NFS read and write requests.
+ * The main purpose of these routines is to provide support for the
+ * coalescing of several requests into a single RPC call.
+ *
+ * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs3.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_page.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/export.h>
+
+#include "internal.h"
+#include "pnfs.h"
+
+static struct kmem_cache *nfs_page_cachep;
+
+static inline struct nfs_page *
+nfs_page_alloc(void)
+{
+ struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
+ if (p)
+ INIT_LIST_HEAD(&p->wb_list);
+ return p;
+}
+
+static inline void
+nfs_page_free(struct nfs_page *p)
+{
+ kmem_cache_free(nfs_page_cachep, p);
+}
+
+/**
+ * nfs_create_request - Create an NFS read/write request.
+ * @ctx: open context to use
+ * @inode: inode to which the request is attached
+ * @page: page to write
+ * @offset: starting offset within the page for the write
+ * @count: number of bytes to read/write
+ *
+ * The page must be locked by the caller. This makes sure we never
+ * create two different requests for the same page.
+ * User should ensure it is safe to sleep in this function.
+ */
+struct nfs_page *
+nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
+ struct page *page,
+ unsigned int offset, unsigned int count)
+{
+ struct nfs_page *req;
+
+ /* try to allocate the request struct */
+ req = nfs_page_alloc();
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ /* get lock context early so we can deal with alloc failures */
+ req->wb_lock_context = nfs_get_lock_context(ctx);
+ if (req->wb_lock_context == NULL) {
+ nfs_page_free(req);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Initialize the request struct. Initially, we assume a
+ * long write-back delay. This will be adjusted in
+ * update_nfs_request below if the region is not locked. */
+ req->wb_page = page;
+ atomic_set(&req->wb_complete, 0);
+ req->wb_index = page->index;
+ page_cache_get(page);
+ BUG_ON(PagePrivate(page));
+ BUG_ON(!PageLocked(page));
+ BUG_ON(page->mapping->host != inode);
+ req->wb_offset = offset;
+ req->wb_pgbase = offset;
+ req->wb_bytes = count;
+ req->wb_context = get_nfs_open_context(ctx);
+ kref_init(&req->wb_kref);
+ return req;
+}
+
+/**
+ * nfs_unlock_request - Unlock request and wake up sleepers.
+ * @req:
+ */
+void nfs_unlock_request(struct nfs_page *req)
+{
+ if (!NFS_WBACK_BUSY(req)) {
+ printk(KERN_ERR "NFS: Invalid unlock attempted\n");
+ BUG();
+ }
+ smp_mb__before_clear_bit();
+ clear_bit(PG_BUSY, &req->wb_flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&req->wb_flags, PG_BUSY);
+ nfs_release_request(req);
+}
+
+/*
+ * nfs_clear_request - Free up all resources allocated to the request
+ * @req:
+ *
+ * Release page and open context resources associated with a read/write
+ * request after it has completed.
+ */
+static void nfs_clear_request(struct nfs_page *req)
+{
+ struct page *page = req->wb_page;
+ struct nfs_open_context *ctx = req->wb_context;
+ struct nfs_lock_context *l_ctx = req->wb_lock_context;
+
+ if (page != NULL) {
+ page_cache_release(page);
+ req->wb_page = NULL;
+ }
+ if (l_ctx != NULL) {
+ nfs_put_lock_context(l_ctx);
+ req->wb_lock_context = NULL;
+ }
+ if (ctx != NULL) {
+ put_nfs_open_context(ctx);
+ req->wb_context = NULL;
+ }
+}
+
+
+/**
+ * nfs_release_request - Release the count on an NFS read/write request
+ * @req: request to release
+ *
+ * Note: Should never be called with the spinlock held!
+ */
+static void nfs_free_request(struct kref *kref)
+{
+ struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
+
+ /* Release struct file and open context */
+ nfs_clear_request(req);
+ nfs_page_free(req);
+}
+
+void nfs_release_request(struct nfs_page *req)
+{
+ kref_put(&req->wb_kref, nfs_free_request);
+}
+
+static int nfs_wait_bit_uninterruptible(void *word)
+{
+ io_schedule();
+ return 0;
+}
+
+/**
+ * nfs_wait_on_request - Wait for a request to complete.
+ * @req: request to wait upon.
+ *
+ * Interruptible by fatal signals only.
+ * The user is responsible for holding a count on the request.
+ */
+int
+nfs_wait_on_request(struct nfs_page *req)
+{
+ return wait_on_bit(&req->wb_flags, PG_BUSY,
+ nfs_wait_bit_uninterruptible,
+ TASK_UNINTERRUPTIBLE);
+}
+
+bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req)
+{
+ /*
+ * FIXME: ideally we should be able to coalesce all requests
+ * that are not block boundary aligned, but currently this
+ * is problematic for the case of bsize < PAGE_CACHE_SIZE,
+ * since nfs_flush_multi and nfs_pagein_multi assume you
+ * can have only one struct nfs_page.
+ */
+ if (desc->pg_bsize < PAGE_SIZE)
+ return 0;
+
+ return desc->pg_count + req->wb_bytes <= desc->pg_bsize;
+}
+EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
+
+/**
+ * nfs_pageio_init - initialise a page io descriptor
+ * @desc: pointer to descriptor
+ * @inode: pointer to inode
+ * @doio: pointer to io function
+ * @bsize: io block size
+ * @io_flags: extra parameters for the io function
+ */
+void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
+ struct inode *inode,
+ const struct nfs_pageio_ops *pg_ops,
+ size_t bsize,
+ int io_flags)
+{
+ INIT_LIST_HEAD(&desc->pg_list);
+ desc->pg_bytes_written = 0;
+ desc->pg_count = 0;
+ desc->pg_bsize = bsize;
+ desc->pg_base = 0;
+ desc->pg_moreio = 0;
+ desc->pg_recoalesce = 0;
+ desc->pg_inode = inode;
+ desc->pg_ops = pg_ops;
+ desc->pg_ioflags = io_flags;
+ desc->pg_error = 0;
+ desc->pg_lseg = NULL;
+}
+
+/**
+ * nfs_can_coalesce_requests - test two requests for compatibility
+ * @prev: pointer to nfs_page
+ * @req: pointer to nfs_page
+ *
+ * The nfs_page structures 'prev' and 'req' are compared to ensure that the
+ * page data area they describe is contiguous, and that their RPC
+ * credentials, NFSv4 open state, and lockowners are the same.
+ *
+ * Return 'true' if this is the case, else return 'false'.
+ */
+static bool nfs_can_coalesce_requests(struct nfs_page *prev,
+ struct nfs_page *req,
+ struct nfs_pageio_descriptor *pgio)
+{
+ if (req->wb_context->cred != prev->wb_context->cred)
+ return false;
+ if (req->wb_lock_context->lockowner != prev->wb_lock_context->lockowner)
+ return false;
+ if (req->wb_context->state != prev->wb_context->state)
+ return false;
+ if (req->wb_index != (prev->wb_index + 1))
+ return false;
+ if (req->wb_pgbase != 0)
+ return false;
+ if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
+ return false;
+ return pgio->pg_ops->pg_test(pgio, prev, req);
+}
+
+/**
+ * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
+ * @desc: destination io descriptor
+ * @req: request
+ *
+ * Returns true if the request 'req' was successfully coalesced into the
+ * existing list of pages 'desc'.
+ */
+static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *req)
+{
+ if (desc->pg_count != 0) {
+ struct nfs_page *prev;
+
+ prev = nfs_list_entry(desc->pg_list.prev);
+ if (!nfs_can_coalesce_requests(prev, req, desc))
+ return 0;
+ } else {
+ if (desc->pg_ops->pg_init)
+ desc->pg_ops->pg_init(desc, req);
+ desc->pg_base = req->wb_pgbase;
+ }
+ nfs_list_remove_request(req);
+ nfs_list_add_request(req, &desc->pg_list);
+ desc->pg_count += req->wb_bytes;
+ return 1;
+}
+
+/*
+ * Helper for nfs_pageio_add_request and nfs_pageio_complete
+ */
+static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
+{
+ if (!list_empty(&desc->pg_list)) {
+ int error = desc->pg_ops->pg_doio(desc);
+ if (error < 0)
+ desc->pg_error = error;
+ else
+ desc->pg_bytes_written += desc->pg_count;
+ }
+ if (list_empty(&desc->pg_list)) {
+ desc->pg_count = 0;
+ desc->pg_base = 0;
+ }
+}
+
+/**
+ * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
+ * @desc: destination io descriptor
+ * @req: request
+ *
+ * Returns true if the request 'req' was successfully coalesced into the
+ * existing list of pages 'desc'.
+ */
+static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *req)
+{
+ while (!nfs_pageio_do_add_request(desc, req)) {
+ desc->pg_moreio = 1;
+ nfs_pageio_doio(desc);
+ if (desc->pg_error < 0)
+ return 0;
+ desc->pg_moreio = 0;
+ if (desc->pg_recoalesce)
+ return 0;
+ }
+ return 1;
+}
+
+static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
+{
+ LIST_HEAD(head);
+
+ do {
+ list_splice_init(&desc->pg_list, &head);
+ desc->pg_bytes_written -= desc->pg_count;
+ desc->pg_count = 0;
+ desc->pg_base = 0;
+ desc->pg_recoalesce = 0;
+
+ while (!list_empty(&head)) {
+ struct nfs_page *req;
+
+ req = list_first_entry(&head, struct nfs_page, wb_list);
+ nfs_list_remove_request(req);
+ if (__nfs_pageio_add_request(desc, req))
+ continue;
+ if (desc->pg_error < 0)
+ return 0;
+ break;
+ }
+ } while (desc->pg_recoalesce);
+ return 1;
+}
+
+int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
+ struct nfs_page *req)
+{
+ int ret;
+
+ do {
+ ret = __nfs_pageio_add_request(desc, req);
+ if (ret)
+ break;
+ if (desc->pg_error < 0)
+ break;
+ ret = nfs_do_recoalesce(desc);
+ } while (ret);
+ return ret;
+}
+
+/**
+ * nfs_pageio_complete - Complete I/O on an nfs_pageio_descriptor
+ * @desc: pointer to io descriptor
+ */
+void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
+{
+ for (;;) {
+ nfs_pageio_doio(desc);
+ if (!desc->pg_recoalesce)
+ break;
+ if (!nfs_do_recoalesce(desc))
+ break;
+ }
+}
+
+/**
+ * nfs_pageio_cond_complete - Conditional I/O completion
+ * @desc: pointer to io descriptor
+ * @index: page index
+ *
+ * It is important to ensure that processes don't try to take locks
+ * on non-contiguous ranges of pages as that might deadlock. This
+ * function should be called before attempting to wait on a locked
+ * nfs_page. It will complete the I/O if the page index 'index'
+ * is not contiguous with the existing list of pages in 'desc'.
+ */
+void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
+{
+ if (!list_empty(&desc->pg_list)) {
+ struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
+ if (index != prev->wb_index + 1)
+ nfs_pageio_complete(desc);
+ }
+}
+
+int __init nfs_init_nfspagecache(void)
+{
+ nfs_page_cachep = kmem_cache_create("nfs_page",
+ sizeof(struct nfs_page),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (nfs_page_cachep == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void nfs_destroy_nfspagecache(void)
+{
+ kmem_cache_destroy(nfs_page_cachep);
+}
+
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
new file mode 100644
index 00000000..38512bcd
--- /dev/null
+++ b/fs/nfs/pnfs.c
@@ -0,0 +1,1552 @@
+/*
+ * pNFS functions to call and manage layout drivers.
+ *
+ * Copyright (c) 2002 [year of first publication]
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/module.h>
+#include "internal.h"
+#include "pnfs.h"
+#include "iostat.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS
+
+/* Locking:
+ *
+ * pnfs_spinlock:
+ * protects pnfs_modules_tbl.
+ */
+static DEFINE_SPINLOCK(pnfs_spinlock);
+
+/*
+ * pnfs_modules_tbl holds all pnfs modules
+ */
+static LIST_HEAD(pnfs_modules_tbl);
+
+/* Return the registered pnfs layout driver module matching given id */
+static struct pnfs_layoutdriver_type *
+find_pnfs_driver_locked(u32 id)
+{
+ struct pnfs_layoutdriver_type *local;
+
+ list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
+ if (local->id == id)
+ goto out;
+ local = NULL;
+out:
+ dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
+ return local;
+}
+
+static struct pnfs_layoutdriver_type *
+find_pnfs_driver(u32 id)
+{
+ struct pnfs_layoutdriver_type *local;
+
+ spin_lock(&pnfs_spinlock);
+ local = find_pnfs_driver_locked(id);
+ spin_unlock(&pnfs_spinlock);
+ return local;
+}
+
+void
+unset_pnfs_layoutdriver(struct nfs_server *nfss)
+{
+ if (nfss->pnfs_curr_ld) {
+ if (nfss->pnfs_curr_ld->clear_layoutdriver)
+ nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
+ module_put(nfss->pnfs_curr_ld->owner);
+ }
+ nfss->pnfs_curr_ld = NULL;
+}
+
+/*
+ * Try to set the server's pnfs module to the pnfs layout type specified by id.
+ * Currently only one pNFS layout driver per filesystem is supported.
+ *
+ * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
+ */
+void
+set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
+ u32 id)
+{
+ struct pnfs_layoutdriver_type *ld_type = NULL;
+
+ if (id == 0)
+ goto out_no_driver;
+ if (!(server->nfs_client->cl_exchange_flags &
+ (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
+ printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
+ __func__, id, server->nfs_client->cl_exchange_flags);
+ goto out_no_driver;
+ }
+ ld_type = find_pnfs_driver(id);
+ if (!ld_type) {
+ request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
+ ld_type = find_pnfs_driver(id);
+ if (!ld_type) {
+ dprintk("%s: No pNFS module found for %u.\n",
+ __func__, id);
+ goto out_no_driver;
+ }
+ }
+ if (!try_module_get(ld_type->owner)) {
+ dprintk("%s: Could not grab reference on module\n", __func__);
+ goto out_no_driver;
+ }
+ server->pnfs_curr_ld = ld_type;
+ if (ld_type->set_layoutdriver
+ && ld_type->set_layoutdriver(server, mntfh)) {
+ printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
+ "driver %u.\n", __func__, id);
+ module_put(ld_type->owner);
+ goto out_no_driver;
+ }
+
+ dprintk("%s: pNFS module for %u set\n", __func__, id);
+ return;
+
+out_no_driver:
+ dprintk("%s: Using NFSv4 I/O\n", __func__);
+ server->pnfs_curr_ld = NULL;
+}
+
+int
+pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
+{
+ int status = -EINVAL;
+ struct pnfs_layoutdriver_type *tmp;
+
+ if (ld_type->id == 0) {
+ printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
+ return status;
+ }
+ if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
+ printk(KERN_ERR "NFS: %s Layout driver must provide "
+ "alloc_lseg and free_lseg.\n", __func__);
+ return status;
+ }
+
+ spin_lock(&pnfs_spinlock);
+ tmp = find_pnfs_driver_locked(ld_type->id);
+ if (!tmp) {
+ list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
+ status = 0;
+ dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
+ ld_type->name);
+ } else {
+ printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
+ __func__, ld_type->id);
+ }
+ spin_unlock(&pnfs_spinlock);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
+
+void
+pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
+{
+ dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
+ spin_lock(&pnfs_spinlock);
+ list_del(&ld_type->pnfs_tblid);
+ spin_unlock(&pnfs_spinlock);
+}
+EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
+
+/*
+ * pNFS client layout cache
+ */
+
+/* Need to hold i_lock if caller does not already hold reference */
+void
+get_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ atomic_inc(&lo->plh_refcount);
+}
+
+static struct pnfs_layout_hdr *
+pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
+{
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
+ return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
+ kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
+}
+
+static void
+pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
+ put_rpccred(lo->plh_lc_cred);
+ return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
+}
+
+static void
+destroy_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ dprintk("%s: freeing layout cache %p\n", __func__, lo);
+ BUG_ON(!list_empty(&lo->plh_layouts));
+ NFS_I(lo->plh_inode)->layout = NULL;
+ pnfs_free_layout_hdr(lo);
+}
+
+static void
+put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+{
+ if (atomic_dec_and_test(&lo->plh_refcount))
+ destroy_layout_hdr(lo);
+}
+
+void
+put_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+ struct inode *inode = lo->plh_inode;
+
+ if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+ destroy_layout_hdr(lo);
+ spin_unlock(&inode->i_lock);
+ }
+}
+
+static void
+init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
+{
+ INIT_LIST_HEAD(&lseg->pls_list);
+ INIT_LIST_HEAD(&lseg->pls_lc_list);
+ atomic_set(&lseg->pls_refcount, 1);
+ smp_mb();
+ set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
+ lseg->pls_layout = lo;
+}
+
+static void free_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct inode *ino = lseg->pls_layout->plh_inode;
+
+ NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+ /* Matched by get_layout_hdr in pnfs_insert_layout */
+ put_layout_hdr(NFS_I(ino)->layout);
+}
+
+static void
+put_lseg_common(struct pnfs_layout_segment *lseg)
+{
+ struct inode *inode = lseg->pls_layout->plh_inode;
+
+ WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+ list_del_init(&lseg->pls_list);
+ if (list_empty(&lseg->pls_layout->plh_segs)) {
+ set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
+ /* Matched by initial refcount set in alloc_init_layout_hdr */
+ put_layout_hdr_locked(lseg->pls_layout);
+ }
+ rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
+}
+
+void
+put_lseg(struct pnfs_layout_segment *lseg)
+{
+ struct inode *inode;
+
+ if (!lseg)
+ return;
+
+ dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
+ atomic_read(&lseg->pls_refcount),
+ test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+ inode = lseg->pls_layout->plh_inode;
+ if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
+ LIST_HEAD(free_me);
+
+ put_lseg_common(lseg);
+ list_add(&lseg->pls_list, &free_me);
+ spin_unlock(&inode->i_lock);
+ pnfs_free_lseg_list(&free_me);
+ }
+}
+EXPORT_SYMBOL_GPL(put_lseg);
+
+static inline u64
+end_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ end = start + len;
+ return end >= start ? end : NFS4_MAX_UINT64;
+}
+
+/* last octet in a range */
+static inline u64
+last_byte_offset(u64 start, u64 len)
+{
+ u64 end;
+
+ BUG_ON(!len);
+ end = start + len;
+ return end > start ? end - 1 : NFS4_MAX_UINT64;
+}
+
+/*
+ * is l2 fully contained in l1?
+ * start1 end1
+ * [----------------------------------)
+ * start2 end2
+ * [----------------)
+ */
+static inline int
+lo_seg_contained(struct pnfs_layout_range *l1,
+ struct pnfs_layout_range *l2)
+{
+ u64 start1 = l1->offset;
+ u64 end1 = end_offset(start1, l1->length);
+ u64 start2 = l2->offset;
+ u64 end2 = end_offset(start2, l2->length);
+
+ return (start1 <= start2) && (end1 >= end2);
+}
+
+/*
+ * is l1 and l2 intersecting?
+ * start1 end1
+ * [----------------------------------)
+ * start2 end2
+ * [----------------)
+ */
+static inline int
+lo_seg_intersecting(struct pnfs_layout_range *l1,
+ struct pnfs_layout_range *l2)
+{
+ u64 start1 = l1->offset;
+ u64 end1 = end_offset(start1, l1->length);
+ u64 start2 = l2->offset;
+ u64 end2 = end_offset(start2, l2->length);
+
+ return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
+ (end2 == NFS4_MAX_UINT64 || end2 > start1);
+}
+
+static bool
+should_free_lseg(struct pnfs_layout_range *lseg_range,
+ struct pnfs_layout_range *recall_range)
+{
+ return (recall_range->iomode == IOMODE_ANY ||
+ lseg_range->iomode == recall_range->iomode) &&
+ lo_seg_intersecting(lseg_range, recall_range);
+}
+
+/* Returns 1 if lseg is removed from list, 0 otherwise */
+static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
+ struct list_head *tmp_list)
+{
+ int rv = 0;
+
+ if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
+ /* Remove the reference keeping the lseg in the
+ * list. It will now be removed when all
+ * outstanding io is finished.
+ */
+ dprintk("%s: lseg %p ref %d\n", __func__, lseg,
+ atomic_read(&lseg->pls_refcount));
+ if (atomic_dec_and_test(&lseg->pls_refcount)) {
+ put_lseg_common(lseg);
+ list_add(&lseg->pls_list, tmp_list);
+ rv = 1;
+ }
+ }
+ return rv;
+}
+
+/* Returns count of number of matching invalid lsegs remaining in list
+ * after call.
+ */
+int
+mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+ struct list_head *tmp_list,
+ struct pnfs_layout_range *recall_range)
+{
+ struct pnfs_layout_segment *lseg, *next;
+ int invalid = 0, removed = 0;
+
+ dprintk("%s:Begin lo %p\n", __func__, lo);
+
+ if (list_empty(&lo->plh_segs)) {
+ if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
+ put_layout_hdr_locked(lo);
+ return 0;
+ }
+ list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+ if (!recall_range ||
+ should_free_lseg(&lseg->pls_range, recall_range)) {
+ dprintk("%s: freeing lseg %p iomode %d "
+ "offset %llu length %llu\n", __func__,
+ lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
+ lseg->pls_range.length);
+ invalid++;
+ removed += mark_lseg_invalid(lseg, tmp_list);
+ }
+ dprintk("%s:Return %i\n", __func__, invalid - removed);
+ return invalid - removed;
+}
+
+/* note free_me must contain lsegs from a single layout_hdr */
+void
+pnfs_free_lseg_list(struct list_head *free_me)
+{
+ struct pnfs_layout_segment *lseg, *tmp;
+ struct pnfs_layout_hdr *lo;
+
+ if (list_empty(free_me))
+ return;
+
+ lo = list_first_entry(free_me, struct pnfs_layout_segment,
+ pls_list)->pls_layout;
+
+ if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
+ struct nfs_client *clp;
+
+ clp = NFS_SERVER(lo->plh_inode)->nfs_client;
+ spin_lock(&clp->cl_lock);
+ list_del_init(&lo->plh_layouts);
+ spin_unlock(&clp->cl_lock);
+ }
+ list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
+ list_del(&lseg->pls_list);
+ free_lseg(lseg);
+ }
+}
+
+void
+pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+ struct pnfs_layout_hdr *lo;
+ LIST_HEAD(tmp_list);
+
+ spin_lock(&nfsi->vfs_inode.i_lock);
+ lo = nfsi->layout;
+ if (lo) {
+ lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
+ mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+ }
+ spin_unlock(&nfsi->vfs_inode.i_lock);
+ pnfs_free_lseg_list(&tmp_list);
+}
+
+/*
+ * Called by the state manger to remove all layouts established under an
+ * expired lease.
+ */
+void
+pnfs_destroy_all_layouts(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ struct pnfs_layout_hdr *lo;
+ LIST_HEAD(tmp_list);
+
+ nfs4_deviceid_mark_client_invalid(clp);
+ nfs4_deviceid_purge_client(clp);
+
+ spin_lock(&clp->cl_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ if (!list_empty(&server->layouts))
+ list_splice_init(&server->layouts, &tmp_list);
+ }
+ rcu_read_unlock();
+ spin_unlock(&clp->cl_lock);
+
+ while (!list_empty(&tmp_list)) {
+ lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
+ plh_layouts);
+ dprintk("%s freeing layout for inode %lu\n", __func__,
+ lo->plh_inode->i_ino);
+ list_del_init(&lo->plh_layouts);
+ pnfs_destroy_layout(NFS_I(lo->plh_inode));
+ }
+}
+
+/* update lo->plh_stateid with new if is more recent */
+void
+pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
+ bool update_barrier)
+{
+ u32 oldseq, newseq;
+
+ oldseq = be32_to_cpu(lo->plh_stateid.seqid);
+ newseq = be32_to_cpu(new->seqid);
+ if ((int)(newseq - oldseq) > 0) {
+ nfs4_stateid_copy(&lo->plh_stateid, new);
+ if (update_barrier) {
+ u32 new_barrier = be32_to_cpu(new->seqid);
+
+ if ((int)(new_barrier - lo->plh_barrier))
+ lo->plh_barrier = new_barrier;
+ } else {
+ /* Because of wraparound, we want to keep the barrier
+ * "close" to the current seqids. It needs to be
+ * within 2**31 to count as "behind", so if it
+ * gets too near that limit, give us a litle leeway
+ * and bring it to within 2**30.
+ * NOTE - and yes, this is all unsigned arithmetic.
+ */
+ if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
+ lo->plh_barrier = newseq - (1 << 30);
+ }
+ }
+}
+
+/* lget is set to 1 if called from inside send_layoutget call chain */
+static bool
+pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
+ int lget)
+{
+ if ((stateid) &&
+ (int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0)
+ return true;
+ return lo->plh_block_lgets ||
+ test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
+ test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+ (list_empty(&lo->plh_segs) &&
+ (atomic_read(&lo->plh_outstanding) > lget));
+}
+
+int
+pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+ struct nfs4_state *open_state)
+{
+ int status = 0;
+
+ dprintk("--> %s\n", __func__);
+ spin_lock(&lo->plh_inode->i_lock);
+ if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
+ status = -EAGAIN;
+ } else if (list_empty(&lo->plh_segs)) {
+ int seq;
+
+ do {
+ seq = read_seqbegin(&open_state->seqlock);
+ nfs4_stateid_copy(dst, &open_state->stateid);
+ } while (read_seqretry(&open_state->seqlock, seq));
+ } else
+ nfs4_stateid_copy(dst, &lo->plh_stateid);
+ spin_unlock(&lo->plh_inode->i_lock);
+ dprintk("<-- %s\n", __func__);
+ return status;
+}
+
+/*
+* Get layout from server.
+* for now, assume that whole file layouts are requested.
+* arg->offset: 0
+* arg->length: all ones
+*/
+static struct pnfs_layout_segment *
+send_layoutget(struct pnfs_layout_hdr *lo,
+ struct nfs_open_context *ctx,
+ struct pnfs_layout_range *range,
+ gfp_t gfp_flags)
+{
+ struct inode *ino = lo->plh_inode;
+ struct nfs_server *server = NFS_SERVER(ino);
+ struct nfs4_layoutget *lgp;
+ struct pnfs_layout_segment *lseg = NULL;
+ struct page **pages = NULL;
+ int i;
+ u32 max_resp_sz, max_pages;
+
+ dprintk("--> %s\n", __func__);
+
+ BUG_ON(ctx == NULL);
+ lgp = kzalloc(sizeof(*lgp), gfp_flags);
+ if (lgp == NULL)
+ return NULL;
+
+ /* allocate pages for xdr post processing */
+ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
+ max_pages = nfs_page_array_len(0, max_resp_sz);
+
+ pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
+ if (!pages)
+ goto out_err_free;
+
+ for (i = 0; i < max_pages; i++) {
+ pages[i] = alloc_page(gfp_flags);
+ if (!pages[i])
+ goto out_err_free;
+ }
+
+ lgp->args.minlength = PAGE_CACHE_SIZE;
+ if (lgp->args.minlength > range->length)
+ lgp->args.minlength = range->length;
+ lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
+ lgp->args.range = *range;
+ lgp->args.type = server->pnfs_curr_ld->id;
+ lgp->args.inode = ino;
+ lgp->args.ctx = get_nfs_open_context(ctx);
+ lgp->args.layout.pages = pages;
+ lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+ lgp->lsegpp = &lseg;
+ lgp->gfp_flags = gfp_flags;
+
+ /* Synchronously retrieve layout information from server and
+ * store in lseg.
+ */
+ nfs4_proc_layoutget(lgp);
+ if (!lseg) {
+ /* remember that LAYOUTGET failed and suspend trying */
+ set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
+ }
+
+ /* free xdr pages */
+ for (i = 0; i < max_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+
+ return lseg;
+
+out_err_free:
+ /* free any allocated xdr pages, lgp as it's not used */
+ if (pages) {
+ for (i = 0; i < max_pages; i++) {
+ if (!pages[i])
+ break;
+ __free_page(pages[i]);
+ }
+ kfree(pages);
+ }
+ kfree(lgp);
+ return NULL;
+}
+
+/* Initiates a LAYOUTRETURN(FILE) */
+int
+_pnfs_return_layout(struct inode *ino)
+{
+ struct pnfs_layout_hdr *lo = NULL;
+ struct nfs_inode *nfsi = NFS_I(ino);
+ LIST_HEAD(tmp_list);
+ struct nfs4_layoutreturn *lrp;
+ nfs4_stateid stateid;
+ int status = 0;
+
+ dprintk("--> %s\n", __func__);
+
+ spin_lock(&ino->i_lock);
+ lo = nfsi->layout;
+ if (!lo) {
+ spin_unlock(&ino->i_lock);
+ dprintk("%s: no layout to return\n", __func__);
+ return status;
+ }
+ stateid = nfsi->layout->plh_stateid;
+ /* Reference matched in nfs4_layoutreturn_release */
+ get_layout_hdr(lo);
+ mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+ lo->plh_block_lgets++;
+ spin_unlock(&ino->i_lock);
+ pnfs_free_lseg_list(&tmp_list);
+
+ WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
+
+ lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
+ if (unlikely(lrp == NULL)) {
+ status = -ENOMEM;
+ set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
+ set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
+ put_layout_hdr(lo);
+ goto out;
+ }
+
+ lrp->args.stateid = stateid;
+ lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
+ lrp->args.inode = ino;
+ lrp->args.layout = lo;
+ lrp->clp = NFS_SERVER(ino)->nfs_client;
+
+ status = nfs4_proc_layoutreturn(lrp);
+out:
+ dprintk("<-- %s status: %d\n", __func__, status);
+ return status;
+}
+
+bool pnfs_roc(struct inode *ino)
+{
+ struct pnfs_layout_hdr *lo;
+ struct pnfs_layout_segment *lseg, *tmp;
+ LIST_HEAD(tmp_list);
+ bool found = false;
+
+ spin_lock(&ino->i_lock);
+ lo = NFS_I(ino)->layout;
+ if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
+ test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
+ goto out_nolayout;
+ list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
+ if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+ mark_lseg_invalid(lseg, &tmp_list);
+ found = true;
+ }
+ if (!found)
+ goto out_nolayout;
+ lo->plh_block_lgets++;
+ get_layout_hdr(lo); /* matched in pnfs_roc_release */
+ spin_unlock(&ino->i_lock);
+ pnfs_free_lseg_list(&tmp_list);
+ return true;
+
+out_nolayout:
+ spin_unlock(&ino->i_lock);
+ return false;
+}
+
+void pnfs_roc_release(struct inode *ino)
+{
+ struct pnfs_layout_hdr *lo;
+
+ spin_lock(&ino->i_lock);
+ lo = NFS_I(ino)->layout;
+ lo->plh_block_lgets--;
+ put_layout_hdr_locked(lo);
+ spin_unlock(&ino->i_lock);
+}
+
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+ struct pnfs_layout_hdr *lo;
+
+ spin_lock(&ino->i_lock);
+ lo = NFS_I(ino)->layout;
+ if ((int)(barrier - lo->plh_barrier) > 0)
+ lo->plh_barrier = barrier;
+ spin_unlock(&ino->i_lock);
+}
+
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_segment *lseg;
+ bool found = false;
+
+ spin_lock(&ino->i_lock);
+ list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
+ if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+ found = true;
+ break;
+ }
+ if (!found) {
+ struct pnfs_layout_hdr *lo = nfsi->layout;
+ u32 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
+
+ /* Since close does not return a layout stateid for use as
+ * a barrier, we choose the worst-case barrier.
+ */
+ *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
+ }
+ spin_unlock(&ino->i_lock);
+ return found;
+}
+
+/*
+ * Compare two layout segments for sorting into layout cache.
+ * We want to preferentially return RW over RO layouts, so ensure those
+ * are seen first.
+ */
+static s64
+cmp_layout(struct pnfs_layout_range *l1,
+ struct pnfs_layout_range *l2)
+{
+ s64 d;
+
+ /* high offset > low offset */
+ d = l1->offset - l2->offset;
+ if (d)
+ return d;
+
+ /* short length > long length */
+ d = l2->length - l1->length;
+ if (d)
+ return d;
+
+ /* read > read/write */
+ return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
+}
+
+static void
+pnfs_insert_layout(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg)
+{
+ struct pnfs_layout_segment *lp;
+
+ dprintk("%s:Begin\n", __func__);
+
+ assert_spin_locked(&lo->plh_inode->i_lock);
+ list_for_each_entry(lp, &lo->plh_segs, pls_list) {
+ if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
+ continue;
+ list_add_tail(&lseg->pls_list, &lp->pls_list);
+ dprintk("%s: inserted lseg %p "
+ "iomode %d offset %llu length %llu before "
+ "lp %p iomode %d offset %llu length %llu\n",
+ __func__, lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset, lseg->pls_range.length,
+ lp, lp->pls_range.iomode, lp->pls_range.offset,
+ lp->pls_range.length);
+ goto out;
+ }
+ list_add_tail(&lseg->pls_list, &lo->plh_segs);
+ dprintk("%s: inserted lseg %p "
+ "iomode %d offset %llu length %llu at tail\n",
+ __func__, lseg, lseg->pls_range.iomode,
+ lseg->pls_range.offset, lseg->pls_range.length);
+out:
+ get_layout_hdr(lo);
+
+ dprintk("%s:Return\n", __func__);
+}
+
+static struct pnfs_layout_hdr *
+alloc_init_layout_hdr(struct inode *ino,
+ struct nfs_open_context *ctx,
+ gfp_t gfp_flags)
+{
+ struct pnfs_layout_hdr *lo;
+
+ lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
+ if (!lo)
+ return NULL;
+ atomic_set(&lo->plh_refcount, 1);
+ INIT_LIST_HEAD(&lo->plh_layouts);
+ INIT_LIST_HEAD(&lo->plh_segs);
+ INIT_LIST_HEAD(&lo->plh_bulk_recall);
+ lo->plh_inode = ino;
+ lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
+ return lo;
+}
+
+static struct pnfs_layout_hdr *
+pnfs_find_alloc_layout(struct inode *ino,
+ struct nfs_open_context *ctx,
+ gfp_t gfp_flags)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct pnfs_layout_hdr *new = NULL;
+
+ dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
+
+ assert_spin_locked(&ino->i_lock);
+ if (nfsi->layout) {
+ if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
+ return NULL;
+ else
+ return nfsi->layout;
+ }
+ spin_unlock(&ino->i_lock);
+ new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
+ spin_lock(&ino->i_lock);
+
+ if (likely(nfsi->layout == NULL)) /* Won the race? */
+ nfsi->layout = new;
+ else
+ pnfs_free_layout_hdr(new);
+ return nfsi->layout;
+}
+
+/*
+ * iomode matching rules:
+ * iomode lseg match
+ * ----- ----- -----
+ * ANY READ true
+ * ANY RW true
+ * RW READ false
+ * RW RW true
+ * READ READ true
+ * READ RW true
+ */
+static int
+is_matching_lseg(struct pnfs_layout_range *ls_range,
+ struct pnfs_layout_range *range)
+{
+ struct pnfs_layout_range range1;
+
+ if ((range->iomode == IOMODE_RW &&
+ ls_range->iomode != IOMODE_RW) ||
+ !lo_seg_intersecting(ls_range, range))
+ return 0;
+
+ /* range1 covers only the first byte in the range */
+ range1 = *range;
+ range1.length = 1;
+ return lo_seg_contained(ls_range, &range1);
+}
+
+/*
+ * lookup range in layout
+ */
+static struct pnfs_layout_segment *
+pnfs_find_lseg(struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_range *range)
+{
+ struct pnfs_layout_segment *lseg, *ret = NULL;
+
+ dprintk("%s:Begin\n", __func__);
+
+ assert_spin_locked(&lo->plh_inode->i_lock);
+ list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
+ if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
+ is_matching_lseg(&lseg->pls_range, range)) {
+ ret = get_lseg(lseg);
+ break;
+ }
+ if (lseg->pls_range.offset > range->offset)
+ break;
+ }
+
+ dprintk("%s:Return lseg %p ref %d\n",
+ __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
+ return ret;
+}
+
+/*
+ * Layout segment is retreived from the server if not cached.
+ * The appropriate layout segment is referenced and returned to the caller.
+ */
+struct pnfs_layout_segment *
+pnfs_update_layout(struct inode *ino,
+ struct nfs_open_context *ctx,
+ loff_t pos,
+ u64 count,
+ enum pnfs_iomode iomode,
+ gfp_t gfp_flags)
+{
+ struct pnfs_layout_range arg = {
+ .iomode = iomode,
+ .offset = pos,
+ .length = count,
+ };
+ unsigned pg_offset;
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct nfs_server *server = NFS_SERVER(ino);
+ struct nfs_client *clp = server->nfs_client;
+ struct pnfs_layout_hdr *lo;
+ struct pnfs_layout_segment *lseg = NULL;
+ bool first = false;
+
+ if (!pnfs_enabled_sb(NFS_SERVER(ino)))
+ return NULL;
+ spin_lock(&ino->i_lock);
+ lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
+ if (lo == NULL) {
+ dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
+ goto out_unlock;
+ }
+
+ /* Do we even need to bother with this? */
+ if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+ dprintk("%s matches recall, use MDS\n", __func__);
+ goto out_unlock;
+ }
+
+ /* if LAYOUTGET already failed once we don't try again */
+ if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
+ goto out_unlock;
+
+ /* Check to see if the layout for the given range already exists */
+ lseg = pnfs_find_lseg(lo, &arg);
+ if (lseg)
+ goto out_unlock;
+
+ if (pnfs_layoutgets_blocked(lo, NULL, 0))
+ goto out_unlock;
+ atomic_inc(&lo->plh_outstanding);
+
+ get_layout_hdr(lo);
+ if (list_empty(&lo->plh_segs))
+ first = true;
+ spin_unlock(&ino->i_lock);
+ if (first) {
+ /* The lo must be on the clp list if there is any
+ * chance of a CB_LAYOUTRECALL(FILE) coming in.
+ */
+ spin_lock(&clp->cl_lock);
+ BUG_ON(!list_empty(&lo->plh_layouts));
+ list_add_tail(&lo->plh_layouts, &server->layouts);
+ spin_unlock(&clp->cl_lock);
+ }
+
+ pg_offset = arg.offset & ~PAGE_CACHE_MASK;
+ if (pg_offset) {
+ arg.offset -= pg_offset;
+ arg.length += pg_offset;
+ }
+ if (arg.length != NFS4_MAX_UINT64)
+ arg.length = PAGE_CACHE_ALIGN(arg.length);
+
+ lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
+ if (!lseg && first) {
+ spin_lock(&clp->cl_lock);
+ list_del_init(&lo->plh_layouts);
+ spin_unlock(&clp->cl_lock);
+ }
+ atomic_dec(&lo->plh_outstanding);
+ put_layout_hdr(lo);
+out:
+ dprintk("%s end, state 0x%lx lseg %p\n", __func__,
+ nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
+ return lseg;
+out_unlock:
+ spin_unlock(&ino->i_lock);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(pnfs_update_layout);
+
+int
+pnfs_layout_process(struct nfs4_layoutget *lgp)
+{
+ struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
+ struct nfs4_layoutget_res *res = &lgp->res;
+ struct pnfs_layout_segment *lseg;
+ struct inode *ino = lo->plh_inode;
+ int status = 0;
+
+ /* Inject layout blob into I/O device driver */
+ lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
+ if (!lseg || IS_ERR(lseg)) {
+ if (!lseg)
+ status = -ENOMEM;
+ else
+ status = PTR_ERR(lseg);
+ dprintk("%s: Could not allocate layout: error %d\n",
+ __func__, status);
+ goto out;
+ }
+
+ spin_lock(&ino->i_lock);
+ if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+ dprintk("%s forget reply due to recall\n", __func__);
+ goto out_forget_reply;
+ }
+
+ if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
+ dprintk("%s forget reply due to state\n", __func__);
+ goto out_forget_reply;
+ }
+ init_lseg(lo, lseg);
+ lseg->pls_range = res->range;
+ *lgp->lsegpp = get_lseg(lseg);
+ pnfs_insert_layout(lo, lseg);
+
+ if (res->return_on_close) {
+ set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
+ set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
+ }
+
+ /* Done processing layoutget. Set the layout stateid */
+ pnfs_set_layout_stateid(lo, &res->stateid, false);
+ spin_unlock(&ino->i_lock);
+out:
+ return status;
+
+out_forget_reply:
+ spin_unlock(&ino->i_lock);
+ lseg->pls_layout = lo;
+ NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+ goto out;
+}
+
+void
+pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ BUG_ON(pgio->pg_lseg != NULL);
+
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ req_offset(req),
+ req->wb_bytes,
+ IOMODE_READ,
+ GFP_KERNEL);
+ /* If no lseg, fall back to read through mds */
+ if (pgio->pg_lseg == NULL)
+ nfs_pageio_reset_read_mds(pgio);
+
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
+
+void
+pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
+{
+ BUG_ON(pgio->pg_lseg != NULL);
+
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ req_offset(req),
+ req->wb_bytes,
+ IOMODE_RW,
+ GFP_NOFS);
+ /* If no lseg, fall back to write through mds */
+ if (pgio->pg_lseg == NULL)
+ nfs_pageio_reset_write_mds(pgio);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
+
+bool
+pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
+
+ if (ld == NULL)
+ return false;
+ nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0);
+ return true;
+}
+
+bool
+pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
+
+ if (ld == NULL)
+ return false;
+ nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags);
+ return true;
+}
+
+bool
+pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
+ struct nfs_page *req)
+{
+ if (pgio->pg_lseg == NULL)
+ return nfs_generic_pg_test(pgio, prev, req);
+
+ /*
+ * Test if a nfs_page is fully contained in the pnfs_layout_range.
+ * Note that this test makes several assumptions:
+ * - that the previous nfs_page in the struct nfs_pageio_descriptor
+ * is known to lie within the range.
+ * - that the nfs_page being tested is known to be contiguous with the
+ * previous nfs_page.
+ * - Layout ranges are page aligned, so we only have to test the
+ * start offset of the request.
+ *
+ * Please also note that 'end_offset' is actually the offset of the
+ * first byte that lies outside the pnfs_layout_range. FIXME?
+ *
+ */
+ return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
+ pgio->pg_lseg->pls_range.length);
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
+
+static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head)
+{
+ struct nfs_pageio_descriptor pgio;
+ LIST_HEAD(failed);
+
+ /* Resend all requests through the MDS */
+ nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE);
+ while (!list_empty(head)) {
+ struct nfs_page *req = nfs_list_entry(head->next);
+
+ nfs_list_remove_request(req);
+ if (!nfs_pageio_add_request(&pgio, req))
+ nfs_list_add_request(req, &failed);
+ }
+ nfs_pageio_complete(&pgio);
+
+ if (!list_empty(&failed)) {
+ /* For some reason our attempt to resend pages. Mark the
+ * overall send request as having failed, and let
+ * nfs_writeback_release_full deal with the error.
+ */
+ list_move(&failed, head);
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * Called by non rpc-based layout drivers
+ */
+void pnfs_ld_write_done(struct nfs_write_data *data)
+{
+ if (likely(!data->pnfs_error)) {
+ pnfs_set_layoutcommit(data);
+ data->mds_ops->rpc_call_done(&data->task, data);
+ } else {
+ dprintk("pnfs write error = %d\n", data->pnfs_error);
+ if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_ERROR) {
+ /* Don't lo_commit on error, Server will needs to
+ * preform a file recovery.
+ */
+ clear_bit(NFS_INO_LAYOUTCOMMIT,
+ &NFS_I(data->inode)->flags);
+ pnfs_return_layout(data->inode);
+ }
+ data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
+ }
+ put_lseg(data->lseg);
+ data->mds_ops->rpc_release(data);
+}
+EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
+
+static void
+pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
+ struct nfs_write_data *data)
+{
+ list_splice_tail_init(&data->pages, &desc->pg_list);
+ if (data->req && list_empty(&data->req->wb_list))
+ nfs_list_add_request(data->req, &desc->pg_list);
+ nfs_pageio_reset_write_mds(desc);
+ desc->pg_recoalesce = 1;
+ put_lseg(data->lseg);
+ nfs_writedata_release(data);
+}
+
+static enum pnfs_try_status
+pnfs_try_to_write_data(struct nfs_write_data *wdata,
+ const struct rpc_call_ops *call_ops,
+ struct pnfs_layout_segment *lseg,
+ int how)
+{
+ struct inode *inode = wdata->inode;
+ enum pnfs_try_status trypnfs;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+
+ wdata->mds_ops = call_ops;
+ wdata->lseg = get_lseg(lseg);
+
+ dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
+ inode->i_ino, wdata->args.count, wdata->args.offset, how);
+
+ trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
+ if (trypnfs == PNFS_NOT_ATTEMPTED) {
+ put_lseg(wdata->lseg);
+ wdata->lseg = NULL;
+ } else
+ nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
+
+ dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
+ return trypnfs;
+}
+
+static void
+pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
+{
+ struct nfs_write_data *data;
+ const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
+ struct pnfs_layout_segment *lseg = desc->pg_lseg;
+
+ desc->pg_lseg = NULL;
+ while (!list_empty(head)) {
+ enum pnfs_try_status trypnfs;
+
+ data = list_entry(head->next, struct nfs_write_data, list);
+ list_del_init(&data->list);
+
+ trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
+ if (trypnfs == PNFS_NOT_ATTEMPTED)
+ pnfs_write_through_mds(desc, data);
+ }
+ put_lseg(lseg);
+}
+
+int
+pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
+{
+ LIST_HEAD(head);
+ int ret;
+
+ ret = nfs_generic_flush(desc, &head);
+ if (ret != 0) {
+ put_lseg(desc->pg_lseg);
+ desc->pg_lseg = NULL;
+ return ret;
+ }
+ pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
+
+static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
+{
+ struct nfs_pageio_descriptor pgio;
+
+ put_lseg(data->lseg);
+ data->lseg = NULL;
+ dprintk("pnfs write error = %d\n", data->pnfs_error);
+ if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_ERROR)
+ pnfs_return_layout(data->inode);
+
+ nfs_pageio_init_read_mds(&pgio, data->inode);
+
+ while (!list_empty(&data->pages)) {
+ struct nfs_page *req = nfs_list_entry(data->pages.next);
+
+ nfs_list_remove_request(req);
+ nfs_pageio_add_request(&pgio, req);
+ }
+ nfs_pageio_complete(&pgio);
+}
+
+/*
+ * Called by non rpc-based layout drivers
+ */
+void pnfs_ld_read_done(struct nfs_read_data *data)
+{
+ if (likely(!data->pnfs_error)) {
+ __nfs4_read_done_cb(data);
+ data->mds_ops->rpc_call_done(&data->task, data);
+ } else
+ pnfs_ld_handle_read_error(data);
+ put_lseg(data->lseg);
+ data->mds_ops->rpc_release(data);
+}
+EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
+
+static void
+pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
+ struct nfs_read_data *data)
+{
+ list_splice_tail_init(&data->pages, &desc->pg_list);
+ if (data->req && list_empty(&data->req->wb_list))
+ nfs_list_add_request(data->req, &desc->pg_list);
+ nfs_pageio_reset_read_mds(desc);
+ desc->pg_recoalesce = 1;
+ nfs_readdata_release(data);
+}
+
+/*
+ * Call the appropriate parallel I/O subsystem read function.
+ */
+static enum pnfs_try_status
+pnfs_try_to_read_data(struct nfs_read_data *rdata,
+ const struct rpc_call_ops *call_ops,
+ struct pnfs_layout_segment *lseg)
+{
+ struct inode *inode = rdata->inode;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+ enum pnfs_try_status trypnfs;
+
+ rdata->mds_ops = call_ops;
+ rdata->lseg = get_lseg(lseg);
+
+ dprintk("%s: Reading ino:%lu %u@%llu\n",
+ __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
+
+ trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
+ if (trypnfs == PNFS_NOT_ATTEMPTED) {
+ put_lseg(rdata->lseg);
+ rdata->lseg = NULL;
+ } else {
+ nfs_inc_stats(inode, NFSIOS_PNFS_READ);
+ }
+ dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
+ return trypnfs;
+}
+
+static void
+pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
+{
+ struct nfs_read_data *data;
+ const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
+ struct pnfs_layout_segment *lseg = desc->pg_lseg;
+
+ desc->pg_lseg = NULL;
+ while (!list_empty(head)) {
+ enum pnfs_try_status trypnfs;
+
+ data = list_entry(head->next, struct nfs_read_data, list);
+ list_del_init(&data->list);
+
+ trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
+ if (trypnfs == PNFS_NOT_ATTEMPTED)
+ pnfs_read_through_mds(desc, data);
+ }
+ put_lseg(lseg);
+}
+
+int
+pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
+{
+ LIST_HEAD(head);
+ int ret;
+
+ ret = nfs_generic_pagein(desc, &head);
+ if (ret != 0) {
+ put_lseg(desc->pg_lseg);
+ desc->pg_lseg = NULL;
+ return ret;
+ }
+ pnfs_do_multiple_reads(desc, &head);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
+
+/*
+ * There can be multiple RW segments.
+ */
+static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
+{
+ struct pnfs_layout_segment *lseg;
+
+ list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
+ if (lseg->pls_range.iomode == IOMODE_RW &&
+ test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
+ list_add(&lseg->pls_lc_list, listp);
+ }
+}
+
+void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
+{
+ if (lseg->pls_range.iomode == IOMODE_RW) {
+ dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
+ } else {
+ dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
+ set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
+ }
+}
+EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
+
+void
+pnfs_set_layoutcommit(struct nfs_write_data *wdata)
+{
+ struct nfs_inode *nfsi = NFS_I(wdata->inode);
+ loff_t end_pos = wdata->mds_offset + wdata->res.count;
+ bool mark_as_dirty = false;
+
+ spin_lock(&nfsi->vfs_inode.i_lock);
+ if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
+ mark_as_dirty = true;
+ dprintk("%s: Set layoutcommit for inode %lu ",
+ __func__, wdata->inode->i_ino);
+ }
+ if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) {
+ /* references matched in nfs4_layoutcommit_release */
+ get_lseg(wdata->lseg);
+ }
+ if (end_pos > nfsi->layout->plh_lwb)
+ nfsi->layout->plh_lwb = end_pos;
+ spin_unlock(&nfsi->vfs_inode.i_lock);
+ dprintk("%s: lseg %p end_pos %llu\n",
+ __func__, wdata->lseg, nfsi->layout->plh_lwb);
+
+ /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
+ * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
+ if (mark_as_dirty)
+ mark_inode_dirty_sync(wdata->inode);
+}
+EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
+
+void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
+{
+ struct nfs_server *nfss = NFS_SERVER(data->args.inode);
+
+ if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
+ nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
+}
+
+/*
+ * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
+ * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
+ * data to disk to allow the server to recover the data if it crashes.
+ * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
+ * is off, and a COMMIT is sent to a data server, or
+ * if WRITEs to a data server return NFS_DATA_SYNC.
+ */
+int
+pnfs_layoutcommit_inode(struct inode *inode, bool sync)
+{
+ struct nfs4_layoutcommit_data *data;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ loff_t end_pos;
+ int status = 0;
+
+ dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
+
+ if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
+ return 0;
+
+ /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
+ data = kzalloc(sizeof(*data), GFP_NOFS);
+ if (!data) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
+ goto out_free;
+
+ if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
+ if (!sync) {
+ status = -EAGAIN;
+ goto out_free;
+ }
+ status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
+ nfs_wait_bit_killable, TASK_KILLABLE);
+ if (status)
+ goto out_free;
+ }
+
+ INIT_LIST_HEAD(&data->lseg_list);
+ spin_lock(&inode->i_lock);
+ if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
+ clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
+ spin_unlock(&inode->i_lock);
+ wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
+ goto out_free;
+ }
+
+ pnfs_list_write_lseg(inode, &data->lseg_list);
+
+ end_pos = nfsi->layout->plh_lwb;
+ nfsi->layout->plh_lwb = 0;
+
+ nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
+ spin_unlock(&inode->i_lock);
+
+ data->args.inode = inode;
+ data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
+ nfs_fattr_init(&data->fattr);
+ data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
+ data->res.fattr = &data->fattr;
+ data->args.lastbytewritten = end_pos - 1;
+ data->res.server = NFS_SERVER(inode);
+
+ status = nfs4_proc_layoutcommit(data, sync);
+out:
+ if (status)
+ mark_inode_dirty_sync(inode);
+ dprintk("<-- %s status %d\n", __func__, status);
+ return status;
+out_free:
+ kfree(data);
+ goto out;
+}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
new file mode 100644
index 00000000..442ebf68
--- /dev/null
+++ b/fs/nfs/pnfs.h
@@ -0,0 +1,440 @@
+/*
+ * pNFS client data structures.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#ifndef FS_NFS_PNFS_H
+#define FS_NFS_PNFS_H
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+
+enum {
+ NFS_LSEG_VALID = 0, /* cleared when lseg is recalled/returned */
+ NFS_LSEG_ROC, /* roc bit received from server */
+ NFS_LSEG_LAYOUTCOMMIT, /* layoutcommit bit set for layoutcommit */
+};
+
+struct pnfs_layout_segment {
+ struct list_head pls_list;
+ struct list_head pls_lc_list;
+ struct pnfs_layout_range pls_range;
+ atomic_t pls_refcount;
+ unsigned long pls_flags;
+ struct pnfs_layout_hdr *pls_layout;
+};
+
+enum pnfs_try_status {
+ PNFS_ATTEMPTED = 0,
+ PNFS_NOT_ATTEMPTED = 1,
+};
+
+#ifdef CONFIG_NFS_V4_1
+
+#define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
+
+enum {
+ NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */
+ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */
+ NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
+ NFS_LAYOUT_ROC, /* some lseg had roc bit set */
+ NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
+};
+
+enum layoutdriver_policy_flags {
+ /* Should the pNFS client commit and return the layout upon a setattr */
+ PNFS_LAYOUTRET_ON_SETATTR = 1 << 0,
+ PNFS_LAYOUTRET_ON_ERROR = 1 << 1,
+};
+
+struct nfs4_deviceid_node;
+
+/* Per-layout driver specific registration structure */
+struct pnfs_layoutdriver_type {
+ struct list_head pnfs_tblid;
+ const u32 id;
+ const char *name;
+ struct module *owner;
+ unsigned flags;
+
+ int (*set_layoutdriver) (struct nfs_server *, const struct nfs_fh *);
+ int (*clear_layoutdriver) (struct nfs_server *);
+
+ struct pnfs_layout_hdr * (*alloc_layout_hdr) (struct inode *inode, gfp_t gfp_flags);
+ void (*free_layout_hdr) (struct pnfs_layout_hdr *);
+
+ struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
+ void (*free_lseg) (struct pnfs_layout_segment *lseg);
+
+ /* test for nfs page cache coalescing */
+ const struct nfs_pageio_ops *pg_read_ops;
+ const struct nfs_pageio_ops *pg_write_ops;
+
+ void (*mark_request_commit) (struct nfs_page *req,
+ struct pnfs_layout_segment *lseg);
+ void (*clear_request_commit) (struct nfs_page *req);
+ int (*scan_commit_lists) (struct inode *inode, int max, spinlock_t *lock);
+ int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how);
+
+ /*
+ * Return PNFS_ATTEMPTED to indicate the layout code has attempted
+ * I/O, else return PNFS_NOT_ATTEMPTED to fall back to normal NFS
+ */
+ enum pnfs_try_status (*read_pagelist) (struct nfs_read_data *nfs_data);
+ enum pnfs_try_status (*write_pagelist) (struct nfs_write_data *nfs_data, int how);
+
+ void (*free_deviceid_node) (struct nfs4_deviceid_node *);
+
+ void (*encode_layoutreturn) (struct pnfs_layout_hdr *layoutid,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutreturn_args *args);
+
+ void (*cleanup_layoutcommit) (struct nfs4_layoutcommit_data *data);
+
+ void (*encode_layoutcommit) (struct pnfs_layout_hdr *layoutid,
+ struct xdr_stream *xdr,
+ const struct nfs4_layoutcommit_args *args);
+};
+
+struct pnfs_layout_hdr {
+ atomic_t plh_refcount;
+ struct list_head plh_layouts; /* other client layouts */
+ struct list_head plh_bulk_recall; /* clnt list of bulk recalls */
+ struct list_head plh_segs; /* layout segments list */
+ nfs4_stateid plh_stateid;
+ atomic_t plh_outstanding; /* number of RPCs out */
+ unsigned long plh_block_lgets; /* block LAYOUTGET if >0 */
+ u32 plh_barrier; /* ignore lower seqids */
+ unsigned long plh_flags;
+ loff_t plh_lwb; /* last write byte for layoutcommit */
+ struct rpc_cred *plh_lc_cred; /* layoutcommit cred */
+ struct inode *plh_inode;
+};
+
+struct pnfs_device {
+ struct nfs4_deviceid dev_id;
+ unsigned int layout_type;
+ unsigned int mincount;
+ struct page **pages;
+ unsigned int pgbase;
+ unsigned int pglen;
+};
+
+#define NFS4_PNFS_GETDEVLIST_MAXNUM 16
+
+struct pnfs_devicelist {
+ unsigned int eof;
+ unsigned int num_devs;
+ struct nfs4_deviceid dev_id[NFS4_PNFS_GETDEVLIST_MAXNUM];
+};
+
+extern int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *);
+extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
+
+/* nfs4proc.c */
+extern int nfs4_proc_getdevicelist(struct nfs_server *server,
+ const struct nfs_fh *fh,
+ struct pnfs_devicelist *devlist);
+extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
+ struct pnfs_device *dev);
+extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
+extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
+
+/* pnfs.c */
+void get_layout_hdr(struct pnfs_layout_hdr *lo);
+void put_lseg(struct pnfs_layout_segment *lseg);
+
+bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *);
+bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int);
+
+void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
+void unset_pnfs_layoutdriver(struct nfs_server *);
+void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *, struct nfs_page *);
+int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc);
+void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *, struct nfs_page *);
+int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc);
+bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev, struct nfs_page *req);
+void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg);
+int pnfs_layout_process(struct nfs4_layoutget *lgp);
+void pnfs_free_lseg_list(struct list_head *tmp_list);
+void pnfs_destroy_layout(struct nfs_inode *);
+void pnfs_destroy_all_layouts(struct nfs_client *);
+void put_layout_hdr(struct pnfs_layout_hdr *lo);
+void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
+ const nfs4_stateid *new,
+ bool update_barrier);
+int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
+ struct pnfs_layout_hdr *lo,
+ struct nfs4_state *open_state);
+int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+ struct list_head *tmp_list,
+ struct pnfs_layout_range *recall_range);
+bool pnfs_roc(struct inode *ino);
+void pnfs_roc_release(struct inode *ino);
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
+void pnfs_set_layoutcommit(struct nfs_write_data *wdata);
+void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data);
+int pnfs_layoutcommit_inode(struct inode *inode, bool sync);
+int _pnfs_return_layout(struct inode *);
+void pnfs_ld_write_done(struct nfs_write_data *);
+void pnfs_ld_read_done(struct nfs_read_data *);
+struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
+ struct nfs_open_context *ctx,
+ loff_t pos,
+ u64 count,
+ enum pnfs_iomode iomode,
+ gfp_t gfp_flags);
+
+void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
+
+/* nfs4_deviceid_flags */
+enum {
+ NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */
+};
+
+/* pnfs_dev.c */
+struct nfs4_deviceid_node {
+ struct hlist_node node;
+ struct hlist_node tmpnode;
+ const struct pnfs_layoutdriver_type *ld;
+ const struct nfs_client *nfs_client;
+ unsigned long flags;
+ struct nfs4_deviceid deviceid;
+ atomic_t ref;
+};
+
+struct nfs4_deviceid_node *nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
+void nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *, const struct nfs_client *, const struct nfs4_deviceid *);
+void nfs4_init_deviceid_node(struct nfs4_deviceid_node *,
+ const struct pnfs_layoutdriver_type *,
+ const struct nfs_client *,
+ const struct nfs4_deviceid *);
+struct nfs4_deviceid_node *nfs4_insert_deviceid_node(struct nfs4_deviceid_node *);
+bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *);
+void nfs4_deviceid_purge_client(const struct nfs_client *);
+
+static inline int lo_fail_bit(u32 iomode)
+{
+ return iomode == IOMODE_RW ?
+ NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
+}
+
+static inline struct pnfs_layout_segment *
+get_lseg(struct pnfs_layout_segment *lseg)
+{
+ if (lseg) {
+ atomic_inc(&lseg->pls_refcount);
+ smp_mb__after_atomic_inc();
+ }
+ return lseg;
+}
+
+/* Return true if a layout driver is being used for this mountpoint */
+static inline int pnfs_enabled_sb(struct nfs_server *nfss)
+{
+ return nfss->pnfs_curr_ld != NULL;
+}
+
+static inline int
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+{
+ if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags))
+ return PNFS_NOT_ATTEMPTED;
+ return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how);
+}
+
+static inline bool
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+ struct inode *inode = req->wb_context->dentry->d_inode;
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+
+ if (lseg == NULL || ld->mark_request_commit == NULL)
+ return false;
+ ld->mark_request_commit(req, lseg);
+ return true;
+}
+
+static inline bool
+pnfs_clear_request_commit(struct nfs_page *req)
+{
+ struct inode *inode = req->wb_context->dentry->d_inode;
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+
+ if (ld == NULL || ld->clear_request_commit == NULL)
+ return false;
+ ld->clear_request_commit(req);
+ return true;
+}
+
+static inline int
+pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+{
+ struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
+ int ret;
+
+ if (ld == NULL || ld->scan_commit_lists == NULL)
+ return 0;
+ ret = ld->scan_commit_lists(inode, max, lock);
+ if (ret != 0)
+ set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags);
+ return ret;
+}
+
+/* Should the pNFS client commit and return the layout upon a setattr */
+static inline bool
+pnfs_ld_layoutret_on_setattr(struct inode *inode)
+{
+ if (!pnfs_enabled_sb(NFS_SERVER(inode)))
+ return false;
+ return NFS_SERVER(inode)->pnfs_curr_ld->flags &
+ PNFS_LAYOUTRET_ON_SETATTR;
+}
+
+static inline int pnfs_return_layout(struct inode *ino)
+{
+ struct nfs_inode *nfsi = NFS_I(ino);
+ struct nfs_server *nfss = NFS_SERVER(ino);
+
+ if (pnfs_enabled_sb(nfss) && nfsi->layout)
+ return _pnfs_return_layout(ino);
+
+ return 0;
+}
+
+#ifdef NFS_DEBUG
+void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
+#else
+static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id)
+{
+}
+#endif /* NFS_DEBUG */
+#else /* CONFIG_NFS_V4_1 */
+
+static inline void pnfs_destroy_all_layouts(struct nfs_client *clp)
+{
+}
+
+static inline void pnfs_destroy_layout(struct nfs_inode *nfsi)
+{
+}
+
+static inline struct pnfs_layout_segment *
+get_lseg(struct pnfs_layout_segment *lseg)
+{
+ return NULL;
+}
+
+static inline void put_lseg(struct pnfs_layout_segment *lseg)
+{
+}
+
+static inline int pnfs_return_layout(struct inode *ino)
+{
+ return 0;
+}
+
+static inline bool
+pnfs_ld_layoutret_on_setattr(struct inode *inode)
+{
+ return false;
+}
+
+static inline bool
+pnfs_roc(struct inode *ino)
+{
+ return false;
+}
+
+static inline void
+pnfs_roc_release(struct inode *ino)
+{
+}
+
+static inline void
+pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+}
+
+static inline bool
+pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+ return false;
+}
+
+static inline void set_pnfs_layoutdriver(struct nfs_server *s,
+ const struct nfs_fh *mntfh, u32 id)
+{
+}
+
+static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
+{
+}
+
+static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
+{
+ return false;
+}
+
+static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags)
+{
+ return false;
+}
+
+static inline int
+pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how)
+{
+ return PNFS_NOT_ATTEMPTED;
+}
+
+static inline bool
+pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+ return false;
+}
+
+static inline bool
+pnfs_clear_request_commit(struct nfs_page *req)
+{
+ return false;
+}
+
+static inline int
+pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock)
+{
+ return 0;
+}
+
+static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
+{
+ return 0;
+}
+
+#endif /* CONFIG_NFS_V4_1 */
+
+#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/pnfs_dev.c b/fs/nfs/pnfs_dev.c
new file mode 100644
index 00000000..73f701f1
--- /dev/null
+++ b/fs/nfs/pnfs_dev.c
@@ -0,0 +1,278 @@
+/*
+ * Device operations for the pnfs client.
+ *
+ * Copyright (c) 2002
+ * The Regents of the University of Michigan
+ * All Rights Reserved
+ *
+ * Dean Hildebrand <dhildebz@umich.edu>
+ * Garth Goodson <Garth.Goodson@netapp.com>
+ *
+ * Permission is granted to use, copy, create derivative works, and
+ * redistribute this software and such derivative works for any purpose,
+ * so long as the name of the University of Michigan is not used in
+ * any advertising or publicity pertaining to the use or distribution
+ * of this software without specific, written prior authorization. If
+ * the above copyright notice or any other identification of the
+ * University of Michigan is included in any copy of any portion of
+ * this software, then the disclaimer below must also be included.
+ *
+ * This software is provided as is, without representation or warranty
+ * of any kind either express or implied, including without limitation
+ * the implied warranties of merchantability, fitness for a particular
+ * purpose, or noninfringement. The Regents of the University of
+ * Michigan shall not be liable for any damages, including special,
+ * indirect, incidental, or consequential damages, with respect to any
+ * claim arising out of or in connection with the use of the software,
+ * even if it has been or is hereafter advised of the possibility of
+ * such damages.
+ */
+
+#include <linux/export.h>
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PNFS
+
+/*
+ * Device ID RCU cache. A device ID is unique per server and layout type.
+ */
+#define NFS4_DEVICE_ID_HASH_BITS 5
+#define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
+#define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
+
+static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
+static DEFINE_SPINLOCK(nfs4_deviceid_lock);
+
+#ifdef NFS_DEBUG
+void
+nfs4_print_deviceid(const struct nfs4_deviceid *id)
+{
+ u32 *p = (u32 *)id;
+
+ dprintk("%s: device id= [%x%x%x%x]\n", __func__,
+ p[0], p[1], p[2], p[3]);
+}
+EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
+#endif
+
+static inline u32
+nfs4_deviceid_hash(const struct nfs4_deviceid *id)
+{
+ unsigned char *cptr = (unsigned char *)id->data;
+ unsigned int nbytes = NFS4_DEVICEID4_SIZE;
+ u32 x = 0;
+
+ while (nbytes--) {
+ x *= 37;
+ x += *cptr++;
+ }
+ return x & NFS4_DEVICE_ID_HASH_MASK;
+}
+
+static struct nfs4_deviceid_node *
+_lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id,
+ long hash)
+{
+ struct nfs4_deviceid_node *d;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
+ if (d->ld == ld && d->nfs_client == clp &&
+ !memcmp(&d->deviceid, id, sizeof(*id))) {
+ if (atomic_read(&d->ref))
+ return d;
+ else
+ continue;
+ }
+ return NULL;
+}
+
+/*
+ * Lookup a deviceid in cache and get a reference count on it if found
+ *
+ * @clp nfs_client associated with deviceid
+ * @id deviceid to look up
+ */
+static struct nfs4_deviceid_node *
+_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id,
+ long hash)
+{
+ struct nfs4_deviceid_node *d;
+
+ rcu_read_lock();
+ d = _lookup_deviceid(ld, clp, id, hash);
+ if (d != NULL)
+ atomic_inc(&d->ref);
+ rcu_read_unlock();
+ return d;
+}
+
+struct nfs4_deviceid_node *
+nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id)
+{
+ return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
+}
+EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
+
+/*
+ * Remove a deviceid from cache
+ *
+ * @clp nfs_client associated with deviceid
+ * @id the deviceid to unhash
+ *
+ * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
+ */
+void
+nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *clp, const struct nfs4_deviceid *id)
+{
+ struct nfs4_deviceid_node *d;
+
+ spin_lock(&nfs4_deviceid_lock);
+ rcu_read_lock();
+ d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
+ rcu_read_unlock();
+ if (!d) {
+ spin_unlock(&nfs4_deviceid_lock);
+ return;
+ }
+ hlist_del_init_rcu(&d->node);
+ spin_unlock(&nfs4_deviceid_lock);
+ synchronize_rcu();
+
+ /* balance the initial ref set in pnfs_insert_deviceid */
+ if (atomic_dec_and_test(&d->ref))
+ d->ld->free_deviceid_node(d);
+}
+EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
+
+void
+nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
+ const struct pnfs_layoutdriver_type *ld,
+ const struct nfs_client *nfs_client,
+ const struct nfs4_deviceid *id)
+{
+ INIT_HLIST_NODE(&d->node);
+ INIT_HLIST_NODE(&d->tmpnode);
+ d->ld = ld;
+ d->nfs_client = nfs_client;
+ d->flags = 0;
+ d->deviceid = *id;
+ atomic_set(&d->ref, 1);
+}
+EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
+
+/*
+ * Uniquely initialize and insert a deviceid node into cache
+ *
+ * @new new deviceid node
+ * Note that the caller must set up the following members:
+ * new->ld
+ * new->nfs_client
+ * new->deviceid
+ *
+ * @ret the inserted node, if none found, otherwise, the found entry.
+ */
+struct nfs4_deviceid_node *
+nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
+{
+ struct nfs4_deviceid_node *d;
+ long hash;
+
+ spin_lock(&nfs4_deviceid_lock);
+ hash = nfs4_deviceid_hash(&new->deviceid);
+ d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
+ if (d) {
+ spin_unlock(&nfs4_deviceid_lock);
+ return d;
+ }
+
+ hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
+ spin_unlock(&nfs4_deviceid_lock);
+ atomic_inc(&new->ref);
+
+ return new;
+}
+EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
+
+/*
+ * Dereference a deviceid node and delete it when its reference count drops
+ * to zero.
+ *
+ * @d deviceid node to put
+ *
+ * return true iff the node was deleted
+ * Note that since the test for d->ref == 0 is sufficient to establish
+ * that the node is no longer hashed in the global device id cache.
+ */
+bool
+nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
+{
+ if (!atomic_dec_and_test(&d->ref))
+ return false;
+ d->ld->free_deviceid_node(d);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
+
+static void
+_deviceid_purge_client(const struct nfs_client *clp, long hash)
+{
+ struct nfs4_deviceid_node *d;
+ struct hlist_node *n;
+ HLIST_HEAD(tmp);
+
+ spin_lock(&nfs4_deviceid_lock);
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
+ if (d->nfs_client == clp && atomic_read(&d->ref)) {
+ hlist_del_init_rcu(&d->node);
+ hlist_add_head(&d->tmpnode, &tmp);
+ }
+ rcu_read_unlock();
+ spin_unlock(&nfs4_deviceid_lock);
+
+ if (hlist_empty(&tmp))
+ return;
+
+ synchronize_rcu();
+ while (!hlist_empty(&tmp)) {
+ d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
+ hlist_del(&d->tmpnode);
+ if (atomic_dec_and_test(&d->ref))
+ d->ld->free_deviceid_node(d);
+ }
+}
+
+void
+nfs4_deviceid_purge_client(const struct nfs_client *clp)
+{
+ long h;
+
+ if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
+ return;
+ for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
+ _deviceid_purge_client(clp, h);
+}
+
+/*
+ * Stop use of all deviceids associated with an nfs_client
+ */
+void
+nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
+{
+ struct nfs4_deviceid_node *d;
+ struct hlist_node *n;
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
+ hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
+ if (d->nfs_client == clp)
+ set_bit(NFS_DEVICEID_INVALID, &d->flags);
+ }
+ rcu_read_unlock();
+}
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
new file mode 100644
index 00000000..b63b6f4d
--- /dev/null
+++ b/fs/nfs/proc.c
@@ -0,0 +1,771 @@
+/*
+ * linux/fs/nfs/proc.c
+ *
+ * Copyright (C) 1992, 1993, 1994 Rick Sladkey
+ *
+ * OS-independent nfs remote procedure call functions
+ *
+ * Tuned by Alan Cox <A.Cox@swansea.ac.uk> for >3K buffers
+ * so at last we can have decent(ish) throughput off a
+ * Sun server.
+ *
+ * Coding optimized and cleaned up by Florian La Roche.
+ * Note: Error returns are optimized for NFS_OK, which isn't translated via
+ * nfs_stat_to_errno(), but happens to be already the right return code.
+ *
+ * Also, the code currently doesn't check the size of the packet, when
+ * it decodes the packet.
+ *
+ * Feel free to fix it and mail me the diffs if it worries you.
+ *
+ * Completely rewritten to support the new RPC call interface;
+ * rewrote and moved the entire XDR stuff to xdr.c
+ * --Olaf Kirch June 1996
+ *
+ * The code below initializes all auto variables explicitly, otherwise
+ * it will fail to work as a module (gcc generates a memset call for an
+ * incomplete struct).
+ */
+
+#include <linux/types.h>
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/pagemap.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/lockd/bind.h>
+#include <linux/freezer.h>
+#include "internal.h"
+
+#define NFSDBG_FACILITY NFSDBG_PROC
+
+/*
+ * wrapper to handle the -EKEYEXPIRED error message. This should generally
+ * only happen if using krb5 auth and a user's TGT expires. NFSv2 doesn't
+ * support the NFSERR_JUKEBOX error code, but we handle this situation in the
+ * same way that we handle that error with NFSv3.
+ */
+static int
+nfs_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
+{
+ int res;
+ do {
+ res = rpc_call_sync(clnt, msg, flags);
+ if (res != -EKEYEXPIRED)
+ break;
+ freezable_schedule_timeout_killable(NFS_JUKEBOX_RETRY_TIME);
+ res = -ERESTARTSYS;
+ } while (!fatal_signal_pending(current));
+ return res;
+}
+
+#define rpc_call_sync(clnt, msg, flags) nfs_rpc_wrapper(clnt, msg, flags)
+
+static int
+nfs_async_handle_expired_key(struct rpc_task *task)
+{
+ if (task->tk_status != -EKEYEXPIRED)
+ return 0;
+ task->tk_status = 0;
+ rpc_restart_call(task);
+ rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
+ return 1;
+}
+
+/*
+ * Bare-bones access to getattr: this is for nfs_read_super.
+ */
+static int
+nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ struct nfs_fattr *fattr = info->fattr;
+ struct nfs2_fsstat fsinfo;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_GETATTR],
+ .rpc_argp = fhandle,
+ .rpc_resp = fattr,
+ };
+ int status;
+
+ dprintk("%s: call getattr\n", __func__);
+ nfs_fattr_init(fattr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ /* Retry with default authentication if different */
+ if (status && server->nfs_client->cl_rpcclient != server->client)
+ status = rpc_call_sync(server->nfs_client->cl_rpcclient, &msg, 0);
+ dprintk("%s: reply getattr: %d\n", __func__, status);
+ if (status)
+ return status;
+ dprintk("%s: call statfs\n", __func__);
+ msg.rpc_proc = &nfs_procedures[NFSPROC_STATFS];
+ msg.rpc_resp = &fsinfo;
+ status = rpc_call_sync(server->client, &msg, 0);
+ /* Retry with default authentication if different */
+ if (status && server->nfs_client->cl_rpcclient != server->client)
+ status = rpc_call_sync(server->nfs_client->cl_rpcclient, &msg, 0);
+ dprintk("%s: reply statfs: %d\n", __func__, status);
+ if (status)
+ return status;
+ info->rtmax = NFS_MAXDATA;
+ info->rtpref = fsinfo.tsize;
+ info->rtmult = fsinfo.bsize;
+ info->wtmax = NFS_MAXDATA;
+ info->wtpref = fsinfo.tsize;
+ info->wtmult = fsinfo.bsize;
+ info->dtpref = fsinfo.tsize;
+ info->maxfilesize = 0x7FFFFFFF;
+ info->lease_time = 0;
+ return 0;
+}
+
+/*
+ * One function for each procedure in the NFS protocol.
+ */
+static int
+nfs_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fattr *fattr)
+{
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_GETATTR],
+ .rpc_argp = fhandle,
+ .rpc_resp = fattr,
+ };
+ int status;
+
+ dprintk("NFS call getattr\n");
+ nfs_fattr_init(fattr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ dprintk("NFS reply getattr: %d\n", status);
+ return status;
+}
+
+static int
+nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
+ struct iattr *sattr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct nfs_sattrargs arg = {
+ .fh = NFS_FH(inode),
+ .sattr = sattr
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_SETATTR],
+ .rpc_argp = &arg,
+ .rpc_resp = fattr,
+ };
+ int status;
+
+ /* Mask out the non-modebit related stuff from attr->ia_mode */
+ sattr->ia_mode &= S_IALLUGO;
+
+ dprintk("NFS call setattr\n");
+ if (sattr->ia_valid & ATTR_FILE)
+ msg.rpc_cred = nfs_file_cred(sattr->ia_file);
+ nfs_fattr_init(fattr);
+ status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ if (status == 0)
+ nfs_setattr_update_inode(inode, sattr);
+ dprintk("NFS reply setattr: %d\n", status);
+ return status;
+}
+
+static int
+nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name,
+ struct nfs_fh *fhandle, struct nfs_fattr *fattr)
+{
+ struct nfs_diropargs arg = {
+ .fh = NFS_FH(dir),
+ .name = name->name,
+ .len = name->len
+ };
+ struct nfs_diropok res = {
+ .fh = fhandle,
+ .fattr = fattr
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_LOOKUP],
+ .rpc_argp = &arg,
+ .rpc_resp = &res,
+ };
+ int status;
+
+ dprintk("NFS call lookup %s\n", name->name);
+ nfs_fattr_init(fattr);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ dprintk("NFS reply lookup: %d\n", status);
+ return status;
+}
+
+static int nfs_proc_readlink(struct inode *inode, struct page *page,
+ unsigned int pgbase, unsigned int pglen)
+{
+ struct nfs_readlinkargs args = {
+ .fh = NFS_FH(inode),
+ .pgbase = pgbase,
+ .pglen = pglen,
+ .pages = &page
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_READLINK],
+ .rpc_argp = &args,
+ };
+ int status;
+
+ dprintk("NFS call readlink\n");
+ status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ dprintk("NFS reply readlink: %d\n", status);
+ return status;
+}
+
+struct nfs_createdata {
+ struct nfs_createargs arg;
+ struct nfs_diropok res;
+ struct nfs_fh fhandle;
+ struct nfs_fattr fattr;
+};
+
+static struct nfs_createdata *nfs_alloc_createdata(struct inode *dir,
+ struct dentry *dentry, struct iattr *sattr)
+{
+ struct nfs_createdata *data;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (data != NULL) {
+ data->arg.fh = NFS_FH(dir);
+ data->arg.name = dentry->d_name.name;
+ data->arg.len = dentry->d_name.len;
+ data->arg.sattr = sattr;
+ nfs_fattr_init(&data->fattr);
+ data->fhandle.size = 0;
+ data->res.fh = &data->fhandle;
+ data->res.fattr = &data->fattr;
+ }
+ return data;
+};
+
+static void nfs_free_createdata(const struct nfs_createdata *data)
+{
+ kfree(data);
+}
+
+static int
+nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ int flags, struct nfs_open_context *ctx)
+{
+ struct nfs_createdata *data;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_CREATE],
+ };
+ int status = -ENOMEM;
+
+ dprintk("NFS call create %s\n", dentry->d_name.name);
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_mark_for_revalidate(dir);
+ if (status == 0)
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
+ dprintk("NFS reply create: %d\n", status);
+ return status;
+}
+
+/*
+ * In NFSv2, mknod is grafted onto the create call.
+ */
+static int
+nfs_proc_mknod(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
+ dev_t rdev)
+{
+ struct nfs_createdata *data;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_CREATE],
+ };
+ umode_t mode;
+ int status = -ENOMEM;
+
+ dprintk("NFS call mknod %s\n", dentry->d_name.name);
+
+ mode = sattr->ia_mode;
+ if (S_ISFIFO(mode)) {
+ sattr->ia_mode = (mode & ~S_IFMT) | S_IFCHR;
+ sattr->ia_valid &= ~ATTR_SIZE;
+ } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
+ sattr->ia_valid |= ATTR_SIZE;
+ sattr->ia_size = new_encode_dev(rdev);/* get out your barf bag */
+ }
+
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_mark_for_revalidate(dir);
+
+ if (status == -EINVAL && S_ISFIFO(mode)) {
+ sattr->ia_mode = mode;
+ nfs_fattr_init(data->res.fattr);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ }
+ if (status == 0)
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
+ dprintk("NFS reply mknod: %d\n", status);
+ return status;
+}
+
+static int
+nfs_proc_remove(struct inode *dir, struct qstr *name)
+{
+ struct nfs_removeargs arg = {
+ .fh = NFS_FH(dir),
+ .name.len = name->len,
+ .name.name = name->name,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_REMOVE],
+ .rpc_argp = &arg,
+ };
+ int status;
+
+ dprintk("NFS call remove %s\n", name->name);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_mark_for_revalidate(dir);
+
+ dprintk("NFS reply remove: %d\n", status);
+ return status;
+}
+
+static void
+nfs_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
+{
+ msg->rpc_proc = &nfs_procedures[NFSPROC_REMOVE];
+}
+
+static void nfs_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
+{
+ rpc_call_start(task);
+}
+
+static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir)
+{
+ if (nfs_async_handle_expired_key(task))
+ return 0;
+ nfs_mark_for_revalidate(dir);
+ return 1;
+}
+
+static void
+nfs_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
+{
+ msg->rpc_proc = &nfs_procedures[NFSPROC_RENAME];
+}
+
+static void nfs_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
+{
+ rpc_call_start(task);
+}
+
+static int
+nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
+ struct inode *new_dir)
+{
+ if (nfs_async_handle_expired_key(task))
+ return 0;
+ nfs_mark_for_revalidate(old_dir);
+ nfs_mark_for_revalidate(new_dir);
+ return 1;
+}
+
+static int
+nfs_proc_rename(struct inode *old_dir, struct qstr *old_name,
+ struct inode *new_dir, struct qstr *new_name)
+{
+ struct nfs_renameargs arg = {
+ .old_dir = NFS_FH(old_dir),
+ .old_name = old_name,
+ .new_dir = NFS_FH(new_dir),
+ .new_name = new_name,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_RENAME],
+ .rpc_argp = &arg,
+ };
+ int status;
+
+ dprintk("NFS call rename %s -> %s\n", old_name->name, new_name->name);
+ status = rpc_call_sync(NFS_CLIENT(old_dir), &msg, 0);
+ nfs_mark_for_revalidate(old_dir);
+ nfs_mark_for_revalidate(new_dir);
+ dprintk("NFS reply rename: %d\n", status);
+ return status;
+}
+
+static int
+nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+{
+ struct nfs_linkargs arg = {
+ .fromfh = NFS_FH(inode),
+ .tofh = NFS_FH(dir),
+ .toname = name->name,
+ .tolen = name->len
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_LINK],
+ .rpc_argp = &arg,
+ };
+ int status;
+
+ dprintk("NFS call link %s\n", name->name);
+ status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
+ nfs_mark_for_revalidate(inode);
+ nfs_mark_for_revalidate(dir);
+ dprintk("NFS reply link: %d\n", status);
+ return status;
+}
+
+static int
+nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
+ unsigned int len, struct iattr *sattr)
+{
+ struct nfs_fh *fh;
+ struct nfs_fattr *fattr;
+ struct nfs_symlinkargs arg = {
+ .fromfh = NFS_FH(dir),
+ .fromname = dentry->d_name.name,
+ .fromlen = dentry->d_name.len,
+ .pages = &page,
+ .pathlen = len,
+ .sattr = sattr
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_SYMLINK],
+ .rpc_argp = &arg,
+ };
+ int status = -ENAMETOOLONG;
+
+ dprintk("NFS call symlink %s\n", dentry->d_name.name);
+
+ if (len > NFS2_MAXPATHLEN)
+ goto out;
+
+ fh = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ status = -ENOMEM;
+ if (fh == NULL || fattr == NULL)
+ goto out_free;
+
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_mark_for_revalidate(dir);
+
+ /*
+ * V2 SYMLINK requests don't return any attributes. Setting the
+ * filehandle size to zero indicates to nfs_instantiate that it
+ * should fill in the data with a LOOKUP call on the wire.
+ */
+ if (status == 0)
+ status = nfs_instantiate(dentry, fh, fattr);
+
+out_free:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fh);
+out:
+ dprintk("NFS reply symlink: %d\n", status);
+ return status;
+}
+
+static int
+nfs_proc_mkdir(struct inode *dir, struct dentry *dentry, struct iattr *sattr)
+{
+ struct nfs_createdata *data;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_MKDIR],
+ };
+ int status = -ENOMEM;
+
+ dprintk("NFS call mkdir %s\n", dentry->d_name.name);
+ data = nfs_alloc_createdata(dir, dentry, sattr);
+ if (data == NULL)
+ goto out;
+ msg.rpc_argp = &data->arg;
+ msg.rpc_resp = &data->res;
+
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_mark_for_revalidate(dir);
+ if (status == 0)
+ status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
+ nfs_free_createdata(data);
+out:
+ dprintk("NFS reply mkdir: %d\n", status);
+ return status;
+}
+
+static int
+nfs_proc_rmdir(struct inode *dir, struct qstr *name)
+{
+ struct nfs_diropargs arg = {
+ .fh = NFS_FH(dir),
+ .name = name->name,
+ .len = name->len
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_RMDIR],
+ .rpc_argp = &arg,
+ };
+ int status;
+
+ dprintk("NFS call rmdir %s\n", name->name);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+ nfs_mark_for_revalidate(dir);
+ dprintk("NFS reply rmdir: %d\n", status);
+ return status;
+}
+
+/*
+ * The READDIR implementation is somewhat hackish - we pass a temporary
+ * buffer to the encode function, which installs it in the receive
+ * the receive iovec. The decode function just parses the reply to make
+ * sure it is syntactically correct; the entries itself are decoded
+ * from nfs_readdir by calling the decode_entry function directly.
+ */
+static int
+nfs_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
+ u64 cookie, struct page **pages, unsigned int count, int plus)
+{
+ struct inode *dir = dentry->d_inode;
+ struct nfs_readdirargs arg = {
+ .fh = NFS_FH(dir),
+ .cookie = cookie,
+ .count = count,
+ .pages = pages,
+ };
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_READDIR],
+ .rpc_argp = &arg,
+ .rpc_cred = cred,
+ };
+ int status;
+
+ dprintk("NFS call readdir %d\n", (unsigned int)cookie);
+ status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
+
+ nfs_invalidate_atime(dir);
+
+ dprintk("NFS reply readdir: %d\n", status);
+ return status;
+}
+
+static int
+nfs_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsstat *stat)
+{
+ struct nfs2_fsstat fsinfo;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_STATFS],
+ .rpc_argp = fhandle,
+ .rpc_resp = &fsinfo,
+ };
+ int status;
+
+ dprintk("NFS call statfs\n");
+ nfs_fattr_init(stat->fattr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ dprintk("NFS reply statfs: %d\n", status);
+ if (status)
+ goto out;
+ stat->tbytes = (u64)fsinfo.blocks * fsinfo.bsize;
+ stat->fbytes = (u64)fsinfo.bfree * fsinfo.bsize;
+ stat->abytes = (u64)fsinfo.bavail * fsinfo.bsize;
+ stat->tfiles = 0;
+ stat->ffiles = 0;
+ stat->afiles = 0;
+out:
+ return status;
+}
+
+static int
+nfs_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_fsinfo *info)
+{
+ struct nfs2_fsstat fsinfo;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs_procedures[NFSPROC_STATFS],
+ .rpc_argp = fhandle,
+ .rpc_resp = &fsinfo,
+ };
+ int status;
+
+ dprintk("NFS call fsinfo\n");
+ nfs_fattr_init(info->fattr);
+ status = rpc_call_sync(server->client, &msg, 0);
+ dprintk("NFS reply fsinfo: %d\n", status);
+ if (status)
+ goto out;
+ info->rtmax = NFS_MAXDATA;
+ info->rtpref = fsinfo.tsize;
+ info->rtmult = fsinfo.bsize;
+ info->wtmax = NFS_MAXDATA;
+ info->wtpref = fsinfo.tsize;
+ info->wtmult = fsinfo.bsize;
+ info->dtpref = fsinfo.tsize;
+ info->maxfilesize = 0x7FFFFFFF;
+ info->lease_time = 0;
+out:
+ return status;
+}
+
+static int
+nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
+ struct nfs_pathconf *info)
+{
+ info->max_link = 0;
+ info->max_namelen = NFS2_MAXNAMLEN;
+ return 0;
+}
+
+static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
+{
+ if (nfs_async_handle_expired_key(task))
+ return -EAGAIN;
+
+ nfs_invalidate_atime(data->inode);
+ if (task->tk_status >= 0) {
+ nfs_refresh_inode(data->inode, data->res.fattr);
+ /* Emulate the eof flag, which isn't normally needed in NFSv2
+ * as it is guaranteed to always return the file attributes
+ */
+ if (data->args.offset + data->args.count >= data->res.fattr->size)
+ data->res.eof = 1;
+ }
+ return 0;
+}
+
+static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
+{
+ msg->rpc_proc = &nfs_procedures[NFSPROC_READ];
+}
+
+static void nfs_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
+{
+ rpc_call_start(task);
+}
+
+static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ if (nfs_async_handle_expired_key(task))
+ return -EAGAIN;
+
+ if (task->tk_status >= 0)
+ nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr);
+ return 0;
+}
+
+static void nfs_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
+{
+ /* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */
+ data->args.stable = NFS_FILE_SYNC;
+ msg->rpc_proc = &nfs_procedures[NFSPROC_WRITE];
+}
+
+static void nfs_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
+{
+ rpc_call_start(task);
+}
+
+static void
+nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
+{
+ BUG();
+}
+
+static int
+nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+ struct inode *inode = filp->f_path.dentry->d_inode;
+
+ return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
+}
+
+/* Helper functions for NFS lock bounds checking */
+#define NFS_LOCK32_OFFSET_MAX ((__s32)0x7fffffffUL)
+static int nfs_lock_check_bounds(const struct file_lock *fl)
+{
+ __s32 start, end;
+
+ start = (__s32)fl->fl_start;
+ if ((loff_t)start != fl->fl_start)
+ goto out_einval;
+
+ if (fl->fl_end != OFFSET_MAX) {
+ end = (__s32)fl->fl_end;
+ if ((loff_t)end != fl->fl_end)
+ goto out_einval;
+ } else
+ end = NFS_LOCK32_OFFSET_MAX;
+
+ if (start < 0 || start > end)
+ goto out_einval;
+ return 0;
+out_einval:
+ return -EINVAL;
+}
+
+const struct nfs_rpc_ops nfs_v2_clientops = {
+ .version = 2, /* protocol version */
+ .dentry_ops = &nfs_dentry_operations,
+ .dir_inode_ops = &nfs_dir_inode_operations,
+ .file_inode_ops = &nfs_file_inode_operations,
+ .file_ops = &nfs_file_operations,
+ .getroot = nfs_proc_get_root,
+ .getattr = nfs_proc_getattr,
+ .setattr = nfs_proc_setattr,
+ .lookup = nfs_proc_lookup,
+ .access = NULL, /* access */
+ .readlink = nfs_proc_readlink,
+ .create = nfs_proc_create,
+ .remove = nfs_proc_remove,
+ .unlink_setup = nfs_proc_unlink_setup,
+ .unlink_rpc_prepare = nfs_proc_unlink_rpc_prepare,
+ .unlink_done = nfs_proc_unlink_done,
+ .rename = nfs_proc_rename,
+ .rename_setup = nfs_proc_rename_setup,
+ .rename_rpc_prepare = nfs_proc_rename_rpc_prepare,
+ .rename_done = nfs_proc_rename_done,
+ .link = nfs_proc_link,
+ .symlink = nfs_proc_symlink,
+ .mkdir = nfs_proc_mkdir,
+ .rmdir = nfs_proc_rmdir,
+ .readdir = nfs_proc_readdir,
+ .mknod = nfs_proc_mknod,
+ .statfs = nfs_proc_statfs,
+ .fsinfo = nfs_proc_fsinfo,
+ .pathconf = nfs_proc_pathconf,
+ .decode_dirent = nfs2_decode_dirent,
+ .read_setup = nfs_proc_read_setup,
+ .read_rpc_prepare = nfs_proc_read_rpc_prepare,
+ .read_done = nfs_read_done,
+ .write_setup = nfs_proc_write_setup,
+ .write_rpc_prepare = nfs_proc_write_rpc_prepare,
+ .write_done = nfs_write_done,
+ .commit_setup = nfs_proc_commit_setup,
+ .lock = nfs_proc_lock,
+ .lock_check_bounds = nfs_lock_check_bounds,
+ .close_context = nfs_close_context,
+ .init_client = nfs_init_client,
+};
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
new file mode 100644
index 00000000..0a4be28c
--- /dev/null
+++ b/fs/nfs/read.c
@@ -0,0 +1,699 @@
+/*
+ * linux/fs/nfs/read.c
+ *
+ * Block I/O for NFS
+ *
+ * Partial copy of Linus' read cache modifications to fs/nfs/file.c
+ * modified for async RPC by okir@monad.swb.de
+ */
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_page.h>
+#include <linux/module.h>
+
+#include "pnfs.h"
+
+#include "nfs4_fs.h"
+#include "internal.h"
+#include "iostat.h"
+#include "fscache.h"
+
+#define NFSDBG_FACILITY NFSDBG_PAGECACHE
+
+static const struct nfs_pageio_ops nfs_pageio_read_ops;
+static const struct rpc_call_ops nfs_read_partial_ops;
+static const struct rpc_call_ops nfs_read_full_ops;
+
+static struct kmem_cache *nfs_rdata_cachep;
+
+struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
+{
+ struct nfs_read_data *p;
+
+ p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
+ if (p) {
+ INIT_LIST_HEAD(&p->pages);
+ p->npages = pagecount;
+ if (pagecount <= ARRAY_SIZE(p->page_array))
+ p->pagevec = p->page_array;
+ else {
+ p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+ if (!p->pagevec) {
+ kmem_cache_free(nfs_rdata_cachep, p);
+ p = NULL;
+ }
+ }
+ }
+ return p;
+}
+
+void nfs_readdata_free(struct nfs_read_data *p)
+{
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
+ kmem_cache_free(nfs_rdata_cachep, p);
+}
+
+void nfs_readdata_release(struct nfs_read_data *rdata)
+{
+ put_nfs_open_context(rdata->args.context);
+ nfs_readdata_free(rdata);
+}
+
+static
+int nfs_return_empty_page(struct page *page)
+{
+ zero_user(page, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ unlock_page(page);
+ return 0;
+}
+
+static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
+{
+ unsigned int remainder = data->args.count - data->res.count;
+ unsigned int base = data->args.pgbase + data->res.count;
+ unsigned int pglen;
+ struct page **pages;
+
+ if (data->res.eof == 0 || remainder == 0)
+ return;
+ /*
+ * Note: "remainder" can never be negative, since we check for
+ * this in the XDR code.
+ */
+ pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
+ base &= ~PAGE_CACHE_MASK;
+ pglen = PAGE_CACHE_SIZE - base;
+ for (;;) {
+ if (remainder <= pglen) {
+ zero_user(*pages, base, remainder);
+ break;
+ }
+ zero_user(*pages, base, pglen);
+ pages++;
+ remainder -= pglen;
+ pglen = PAGE_CACHE_SIZE;
+ base = 0;
+ }
+}
+
+void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode)
+{
+ nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops,
+ NFS_SERVER(inode)->rsize, 0);
+}
+
+void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
+{
+ pgio->pg_ops = &nfs_pageio_read_ops;
+ pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
+}
+EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
+
+static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode)
+{
+ if (!pnfs_pageio_init_read(pgio, inode))
+ nfs_pageio_init_read_mds(pgio, inode);
+}
+
+int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
+ struct page *page)
+{
+ struct nfs_page *new;
+ unsigned int len;
+ struct nfs_pageio_descriptor pgio;
+
+ len = nfs_page_length(page);
+ if (len == 0)
+ return nfs_return_empty_page(page);
+ new = nfs_create_request(ctx, inode, page, 0, len);
+ if (IS_ERR(new)) {
+ unlock_page(page);
+ return PTR_ERR(new);
+ }
+ if (len < PAGE_CACHE_SIZE)
+ zero_user_segment(page, len, PAGE_CACHE_SIZE);
+
+ nfs_pageio_init_read(&pgio, inode);
+ nfs_pageio_add_request(&pgio, new);
+ nfs_pageio_complete(&pgio);
+ return 0;
+}
+
+static void nfs_readpage_release(struct nfs_page *req)
+{
+ struct inode *d_inode = req->wb_context->dentry->d_inode;
+
+ if (PageUptodate(req->wb_page))
+ nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
+
+ unlock_page(req->wb_page);
+
+ dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
+ req->wb_context->dentry->d_inode->i_sb->s_id,
+ (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+ req->wb_bytes,
+ (long long)req_offset(req));
+ nfs_release_request(req);
+}
+
+int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops)
+{
+ struct inode *inode = data->inode;
+ int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .task = &data->task,
+ .rpc_client = clnt,
+ .rpc_message = &msg,
+ .callback_ops = call_ops,
+ .callback_data = data,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC | swap_flags,
+ };
+
+ /* Set up the initial task struct. */
+ NFS_PROTO(inode)->read_setup(data, &msg);
+
+ dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ "
+ "offset %llu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ rpc_put_task(task);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs_initiate_read);
+
+/*
+ * Set up the NFS read request struct
+ */
+static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
+ unsigned int count, unsigned int offset)
+{
+ struct inode *inode = req->wb_context->dentry->d_inode;
+
+ data->req = req;
+ data->inode = inode;
+ data->cred = req->wb_context->cred;
+
+ data->args.fh = NFS_FH(inode);
+ data->args.offset = req_offset(req) + offset;
+ data->args.pgbase = req->wb_pgbase + offset;
+ data->args.pages = data->pagevec;
+ data->args.count = count;
+ data->args.context = get_nfs_open_context(req->wb_context);
+ data->args.lock_context = req->wb_lock_context;
+
+ data->res.fattr = &data->fattr;
+ data->res.count = count;
+ data->res.eof = 0;
+ nfs_fattr_init(&data->fattr);
+}
+
+static int nfs_do_read(struct nfs_read_data *data,
+ const struct rpc_call_ops *call_ops)
+{
+ struct inode *inode = data->args.context->dentry->d_inode;
+
+ return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops);
+}
+
+static int
+nfs_do_multiple_reads(struct list_head *head,
+ const struct rpc_call_ops *call_ops)
+{
+ struct nfs_read_data *data;
+ int ret = 0;
+
+ while (!list_empty(head)) {
+ int ret2;
+
+ data = list_entry(head->next, struct nfs_read_data, list);
+ list_del_init(&data->list);
+
+ ret2 = nfs_do_read(data, call_ops);
+ if (ret == 0)
+ ret = ret2;
+ }
+ return ret;
+}
+
+static void
+nfs_async_read_error(struct list_head *head)
+{
+ struct nfs_page *req;
+
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_readpage_release(req);
+ }
+}
+
+/*
+ * Generate multiple requests to fill a single page.
+ *
+ * We optimize to reduce the number of read operations on the wire. If we
+ * detect that we're reading a page, or an area of a page, that is past the
+ * end of file, we do not generate NFS read operations but just clear the
+ * parts of the page that would have come back zero from the server anyway.
+ *
+ * We rely on the cached value of i_size to make this determination; another
+ * client can fill pages on the server past our cached end-of-file, but we
+ * won't see the new data until our attribute cache is updated. This is more
+ * or less conventional NFS client behavior.
+ */
+static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+{
+ struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+ struct page *page = req->wb_page;
+ struct nfs_read_data *data;
+ size_t rsize = desc->pg_bsize, nbytes;
+ unsigned int offset;
+ int requests = 0;
+ int ret = 0;
+
+ nfs_list_remove_request(req);
+
+ offset = 0;
+ nbytes = desc->pg_count;
+ do {
+ size_t len = min(nbytes,rsize);
+
+ data = nfs_readdata_alloc(1);
+ if (!data)
+ goto out_bad;
+ data->pagevec[0] = page;
+ nfs_read_rpcsetup(req, data, len, offset);
+ list_add(&data->list, res);
+ requests++;
+ nbytes -= len;
+ offset += len;
+ } while(nbytes != 0);
+ atomic_set(&req->wb_complete, requests);
+ desc->pg_rpc_callops = &nfs_read_partial_ops;
+ return ret;
+out_bad:
+ while (!list_empty(res)) {
+ data = list_entry(res->next, struct nfs_read_data, list);
+ list_del(&data->list);
+ nfs_readdata_release(data);
+ }
+ nfs_readpage_release(req);
+ return -ENOMEM;
+}
+
+static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+{
+ struct nfs_page *req;
+ struct page **pages;
+ struct nfs_read_data *data;
+ struct list_head *head = &desc->pg_list;
+ int ret = 0;
+
+ data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base,
+ desc->pg_count));
+ if (!data) {
+ nfs_async_read_error(head);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pages = data->pagevec;
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_list_add_request(req, &data->pages);
+ *pages++ = req->wb_page;
+ }
+ req = nfs_list_entry(data->pages.next);
+
+ nfs_read_rpcsetup(req, data, desc->pg_count, 0);
+ list_add(&data->list, res);
+ desc->pg_rpc_callops = &nfs_read_full_ops;
+out:
+ return ret;
+}
+
+int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head)
+{
+ if (desc->pg_bsize < PAGE_CACHE_SIZE)
+ return nfs_pagein_multi(desc, head);
+ return nfs_pagein_one(desc, head);
+}
+
+static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
+{
+ LIST_HEAD(head);
+ int ret;
+
+ ret = nfs_generic_pagein(desc, &head);
+ if (ret == 0)
+ ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops);
+ return ret;
+}
+
+static const struct nfs_pageio_ops nfs_pageio_read_ops = {
+ .pg_test = nfs_generic_pg_test,
+ .pg_doio = nfs_generic_pg_readpages,
+};
+
+/*
+ * This is the callback from RPC telling us whether a reply was
+ * received or some error occurred (timeout or socket shutdown).
+ */
+int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
+{
+ int status;
+
+ dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
+ task->tk_status);
+
+ status = NFS_PROTO(data->inode)->read_done(task, data);
+ if (status != 0)
+ return status;
+
+ nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
+
+ if (task->tk_status == -ESTALE) {
+ set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
+ nfs_mark_for_revalidate(data->inode);
+ }
+ return 0;
+}
+
+static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
+{
+ struct nfs_readargs *argp = &data->args;
+ struct nfs_readres *resp = &data->res;
+
+ if (resp->eof || resp->count == argp->count)
+ return;
+
+ /* This is a short read! */
+ nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
+ /* Has the server at least made some progress? */
+ if (resp->count == 0)
+ return;
+
+ /* Yes, so retry the read at the end of the data */
+ data->mds_offset += resp->count;
+ argp->offset += resp->count;
+ argp->pgbase += resp->count;
+ argp->count -= resp->count;
+ rpc_restart_call_prepare(task);
+}
+
+/*
+ * Handle a read reply that fills part of a page.
+ */
+static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+
+ if (nfs_readpage_result(task, data) != 0)
+ return;
+ if (task->tk_status < 0)
+ return;
+
+ nfs_readpage_truncate_uninitialised_page(data);
+ nfs_readpage_retry(task, data);
+}
+
+static void nfs_readpage_release_partial(void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+ struct nfs_page *req = data->req;
+ struct page *page = req->wb_page;
+ int status = data->task.tk_status;
+
+ if (status < 0)
+ set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
+
+ if (atomic_dec_and_test(&req->wb_complete)) {
+ if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
+ SetPageUptodate(page);
+ nfs_readpage_release(req);
+ }
+ nfs_readdata_release(calldata);
+}
+
+void nfs_read_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+ NFS_PROTO(data->inode)->read_rpc_prepare(task, data);
+}
+
+static const struct rpc_call_ops nfs_read_partial_ops = {
+ .rpc_call_prepare = nfs_read_prepare,
+ .rpc_call_done = nfs_readpage_result_partial,
+ .rpc_release = nfs_readpage_release_partial,
+};
+
+static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
+{
+ unsigned int count = data->res.count;
+ unsigned int base = data->args.pgbase;
+ struct page **pages;
+
+ if (data->res.eof)
+ count = data->args.count;
+ if (unlikely(count == 0))
+ return;
+ pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
+ base &= ~PAGE_CACHE_MASK;
+ count += base;
+ for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
+ SetPageUptodate(*pages);
+ if (count == 0)
+ return;
+ /* Was this a short read? */
+ if (data->res.eof || data->res.count == data->args.count)
+ SetPageUptodate(*pages);
+}
+
+/*
+ * This is the callback from RPC telling us whether a reply was
+ * received or some error occurred (timeout or socket shutdown).
+ */
+static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+
+ if (nfs_readpage_result(task, data) != 0)
+ return;
+ if (task->tk_status < 0)
+ return;
+ /*
+ * Note: nfs_readpage_retry may change the values of
+ * data->args. In the multi-page case, we therefore need
+ * to ensure that we call nfs_readpage_set_pages_uptodate()
+ * first.
+ */
+ nfs_readpage_truncate_uninitialised_page(data);
+ nfs_readpage_set_pages_uptodate(data);
+ nfs_readpage_retry(task, data);
+}
+
+static void nfs_readpage_release_full(void *calldata)
+{
+ struct nfs_read_data *data = calldata;
+
+ while (!list_empty(&data->pages)) {
+ struct nfs_page *req = nfs_list_entry(data->pages.next);
+
+ nfs_list_remove_request(req);
+ nfs_readpage_release(req);
+ }
+ nfs_readdata_release(calldata);
+}
+
+static const struct rpc_call_ops nfs_read_full_ops = {
+ .rpc_call_prepare = nfs_read_prepare,
+ .rpc_call_done = nfs_readpage_result_full,
+ .rpc_release = nfs_readpage_release_full,
+};
+
+/*
+ * Read a page over NFS.
+ * We read the page synchronously in the following case:
+ * - The error flag is set for this page. This happens only when a
+ * previous async read operation failed.
+ */
+int nfs_readpage(struct file *file, struct page *page)
+{
+ struct nfs_open_context *ctx;
+ struct inode *inode = page->mapping->host;
+ int error;
+
+ dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
+ page, PAGE_CACHE_SIZE, page->index);
+ nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
+ nfs_add_stats(inode, NFSIOS_READPAGES, 1);
+
+ /*
+ * Try to flush any pending writes to the file..
+ *
+ * NOTE! Because we own the page lock, there cannot
+ * be any new pending writes generated at this point
+ * for this page (other pages can be written to).
+ */
+ error = nfs_wb_page(inode, page);
+ if (error)
+ goto out_unlock;
+ if (PageUptodate(page))
+ goto out_unlock;
+
+ error = -ESTALE;
+ if (NFS_STALE(inode))
+ goto out_unlock;
+
+ if (file == NULL) {
+ error = -EBADF;
+ ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
+ if (ctx == NULL)
+ goto out_unlock;
+ } else
+ ctx = get_nfs_open_context(nfs_file_open_context(file));
+
+ if (!IS_SYNC(inode)) {
+ error = nfs_readpage_from_fscache(ctx, inode, page);
+ if (error == 0)
+ goto out;
+ }
+
+ error = nfs_readpage_async(ctx, inode, page);
+
+out:
+ put_nfs_open_context(ctx);
+ return error;
+out_unlock:
+ unlock_page(page);
+ return error;
+}
+
+struct nfs_readdesc {
+ struct nfs_pageio_descriptor *pgio;
+ struct nfs_open_context *ctx;
+};
+
+static int
+readpage_async_filler(void *data, struct page *page)
+{
+ struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
+ struct inode *inode = page->mapping->host;
+ struct nfs_page *new;
+ unsigned int len;
+ int error;
+
+ len = nfs_page_length(page);
+ if (len == 0)
+ return nfs_return_empty_page(page);
+
+ new = nfs_create_request(desc->ctx, inode, page, 0, len);
+ if (IS_ERR(new))
+ goto out_error;
+
+ if (len < PAGE_CACHE_SIZE)
+ zero_user_segment(page, len, PAGE_CACHE_SIZE);
+ if (!nfs_pageio_add_request(desc->pgio, new)) {
+ error = desc->pgio->pg_error;
+ goto out_unlock;
+ }
+ return 0;
+out_error:
+ error = PTR_ERR(new);
+out_unlock:
+ unlock_page(page);
+ return error;
+}
+
+int nfs_readpages(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages)
+{
+ struct nfs_pageio_descriptor pgio;
+ struct nfs_readdesc desc = {
+ .pgio = &pgio,
+ };
+ struct inode *inode = mapping->host;
+ unsigned long npages;
+ int ret = -ESTALE;
+
+ dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ nr_pages);
+ nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
+
+ if (NFS_STALE(inode))
+ goto out;
+
+ if (filp == NULL) {
+ desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
+ if (desc.ctx == NULL)
+ return -EBADF;
+ } else
+ desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
+
+ /* attempt to read as many of the pages as possible from the cache
+ * - this returns -ENOBUFS immediately if the cookie is negative
+ */
+ ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
+ pages, &nr_pages);
+ if (ret == 0)
+ goto read_complete; /* all pages were read */
+
+ nfs_pageio_init_read(&pgio, inode);
+
+ ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
+
+ nfs_pageio_complete(&pgio);
+ npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ nfs_add_stats(inode, NFSIOS_READPAGES, npages);
+read_complete:
+ put_nfs_open_context(desc.ctx);
+out:
+ return ret;
+}
+
+int __init nfs_init_readpagecache(void)
+{
+ nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
+ sizeof(struct nfs_read_data),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (nfs_rdata_cachep == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void nfs_destroy_readpagecache(void)
+{
+ kmem_cache_destroy(nfs_rdata_cachep);
+}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
new file mode 100644
index 00000000..4ac7fca7
--- /dev/null
+++ b/fs/nfs/super.c
@@ -0,0 +1,3149 @@
+/*
+ * linux/fs/nfs/super.c
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ *
+ * nfs superblock handling functions
+ *
+ * Modularised by Alan Cox <alan@lxorguk.ukuu.org.uk>, while hacking some
+ * experimental NFS changes. Modularisation taken straight from SYS5 fs.
+ *
+ * Change to nfs_read_super() to permit NFS mounts to multi-homed hosts.
+ * J.S.Peatfield@damtp.cam.ac.uk
+ *
+ * Split from inode.c by David Howells <dhowells@redhat.com>
+ *
+ * - superblocks are indexed on server only - all inodes, dentries, etc. associated with a
+ * particular server are held in the same superblock
+ * - NFS superblocks can have several effective roots to the dentry tree
+ * - directory type roots are spliced into the tree when a path from one root reaches the root
+ * of another (see nfs_lookup())
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include <linux/time.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/sunrpc/metrics.h>
+#include <linux/sunrpc/xprtsock.h>
+#include <linux/sunrpc/xprtrdma.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include <linux/lockd/bind.h>
+#include <linux/seq_file.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/nfs_idmap.h>
+#include <linux/vfs.h>
+#include <linux/inet.h>
+#include <linux/in6.h>
+#include <linux/slab.h>
+#include <net/ipv6.h>
+#include <linux/netdevice.h>
+#include <linux/nfs_xdr.h>
+#include <linux/magic.h>
+#include <linux/parser.h>
+#include <linux/nsproxy.h>
+#include <linux/rcupdate.h>
+
+#include <asm/uaccess.h>
+
+#include "nfs4_fs.h"
+#include "callback.h"
+#include "delegation.h"
+#include "iostat.h"
+#include "internal.h"
+#include "fscache.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_VFS
+
+#ifdef CONFIG_NFS_V3
+#define NFS_DEFAULT_VERSION 3
+#else
+#define NFS_DEFAULT_VERSION 2
+#endif
+
+enum {
+ /* Mount options that take no arguments */
+ Opt_soft, Opt_hard,
+ Opt_posix, Opt_noposix,
+ Opt_cto, Opt_nocto,
+ Opt_ac, Opt_noac,
+ Opt_lock, Opt_nolock,
+ Opt_udp, Opt_tcp, Opt_rdma,
+ Opt_acl, Opt_noacl,
+ Opt_rdirplus, Opt_nordirplus,
+ Opt_sharecache, Opt_nosharecache,
+ Opt_resvport, Opt_noresvport,
+ Opt_fscache, Opt_nofscache,
+
+ /* Mount options that take integer arguments */
+ Opt_port,
+ Opt_rsize, Opt_wsize, Opt_bsize,
+ Opt_timeo, Opt_retrans,
+ Opt_acregmin, Opt_acregmax,
+ Opt_acdirmin, Opt_acdirmax,
+ Opt_actimeo,
+ Opt_namelen,
+ Opt_mountport,
+ Opt_mountvers,
+ Opt_minorversion,
+
+ /* Mount options that take string arguments */
+ Opt_nfsvers,
+ Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
+ Opt_addr, Opt_mountaddr, Opt_clientaddr,
+ Opt_lookupcache,
+ Opt_fscache_uniq,
+ Opt_local_lock,
+
+ /* Special mount options */
+ Opt_userspace, Opt_deprecated, Opt_sloppy,
+
+ Opt_err
+};
+
+static const match_table_t nfs_mount_option_tokens = {
+ { Opt_userspace, "bg" },
+ { Opt_userspace, "fg" },
+ { Opt_userspace, "retry=%s" },
+
+ { Opt_sloppy, "sloppy" },
+
+ { Opt_soft, "soft" },
+ { Opt_hard, "hard" },
+ { Opt_deprecated, "intr" },
+ { Opt_deprecated, "nointr" },
+ { Opt_posix, "posix" },
+ { Opt_noposix, "noposix" },
+ { Opt_cto, "cto" },
+ { Opt_nocto, "nocto" },
+ { Opt_ac, "ac" },
+ { Opt_noac, "noac" },
+ { Opt_lock, "lock" },
+ { Opt_nolock, "nolock" },
+ { Opt_udp, "udp" },
+ { Opt_tcp, "tcp" },
+ { Opt_rdma, "rdma" },
+ { Opt_acl, "acl" },
+ { Opt_noacl, "noacl" },
+ { Opt_rdirplus, "rdirplus" },
+ { Opt_nordirplus, "nordirplus" },
+ { Opt_sharecache, "sharecache" },
+ { Opt_nosharecache, "nosharecache" },
+ { Opt_resvport, "resvport" },
+ { Opt_noresvport, "noresvport" },
+ { Opt_fscache, "fsc" },
+ { Opt_nofscache, "nofsc" },
+
+ { Opt_port, "port=%s" },
+ { Opt_rsize, "rsize=%s" },
+ { Opt_wsize, "wsize=%s" },
+ { Opt_bsize, "bsize=%s" },
+ { Opt_timeo, "timeo=%s" },
+ { Opt_retrans, "retrans=%s" },
+ { Opt_acregmin, "acregmin=%s" },
+ { Opt_acregmax, "acregmax=%s" },
+ { Opt_acdirmin, "acdirmin=%s" },
+ { Opt_acdirmax, "acdirmax=%s" },
+ { Opt_actimeo, "actimeo=%s" },
+ { Opt_namelen, "namlen=%s" },
+ { Opt_mountport, "mountport=%s" },
+ { Opt_mountvers, "mountvers=%s" },
+ { Opt_minorversion, "minorversion=%s" },
+
+ { Opt_nfsvers, "nfsvers=%s" },
+ { Opt_nfsvers, "vers=%s" },
+
+ { Opt_sec, "sec=%s" },
+ { Opt_proto, "proto=%s" },
+ { Opt_mountproto, "mountproto=%s" },
+ { Opt_addr, "addr=%s" },
+ { Opt_clientaddr, "clientaddr=%s" },
+ { Opt_mounthost, "mounthost=%s" },
+ { Opt_mountaddr, "mountaddr=%s" },
+
+ { Opt_lookupcache, "lookupcache=%s" },
+ { Opt_fscache_uniq, "fsc=%s" },
+ { Opt_local_lock, "local_lock=%s" },
+
+ /* The following needs to be listed after all other options */
+ { Opt_nfsvers, "v%s" },
+
+ { Opt_err, NULL }
+};
+
+enum {
+ Opt_xprt_udp, Opt_xprt_udp6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_rdma,
+
+ Opt_xprt_err
+};
+
+static const match_table_t nfs_xprt_protocol_tokens = {
+ { Opt_xprt_udp, "udp" },
+ { Opt_xprt_udp6, "udp6" },
+ { Opt_xprt_tcp, "tcp" },
+ { Opt_xprt_tcp6, "tcp6" },
+ { Opt_xprt_rdma, "rdma" },
+
+ { Opt_xprt_err, NULL }
+};
+
+enum {
+ Opt_sec_none, Opt_sec_sys,
+ Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
+ Opt_sec_lkey, Opt_sec_lkeyi, Opt_sec_lkeyp,
+ Opt_sec_spkm, Opt_sec_spkmi, Opt_sec_spkmp,
+
+ Opt_sec_err
+};
+
+static const match_table_t nfs_secflavor_tokens = {
+ { Opt_sec_none, "none" },
+ { Opt_sec_none, "null" },
+ { Opt_sec_sys, "sys" },
+
+ { Opt_sec_krb5, "krb5" },
+ { Opt_sec_krb5i, "krb5i" },
+ { Opt_sec_krb5p, "krb5p" },
+
+ { Opt_sec_lkey, "lkey" },
+ { Opt_sec_lkeyi, "lkeyi" },
+ { Opt_sec_lkeyp, "lkeyp" },
+
+ { Opt_sec_spkm, "spkm3" },
+ { Opt_sec_spkmi, "spkm3i" },
+ { Opt_sec_spkmp, "spkm3p" },
+
+ { Opt_sec_err, NULL }
+};
+
+enum {
+ Opt_lookupcache_all, Opt_lookupcache_positive,
+ Opt_lookupcache_none,
+
+ Opt_lookupcache_err
+};
+
+static match_table_t nfs_lookupcache_tokens = {
+ { Opt_lookupcache_all, "all" },
+ { Opt_lookupcache_positive, "pos" },
+ { Opt_lookupcache_positive, "positive" },
+ { Opt_lookupcache_none, "none" },
+
+ { Opt_lookupcache_err, NULL }
+};
+
+enum {
+ Opt_local_lock_all, Opt_local_lock_flock, Opt_local_lock_posix,
+ Opt_local_lock_none,
+
+ Opt_local_lock_err
+};
+
+static match_table_t nfs_local_lock_tokens = {
+ { Opt_local_lock_all, "all" },
+ { Opt_local_lock_flock, "flock" },
+ { Opt_local_lock_posix, "posix" },
+ { Opt_local_lock_none, "none" },
+
+ { Opt_local_lock_err, NULL }
+};
+
+enum {
+ Opt_vers_2, Opt_vers_3, Opt_vers_4, Opt_vers_4_0,
+ Opt_vers_4_1,
+
+ Opt_vers_err
+};
+
+static match_table_t nfs_vers_tokens = {
+ { Opt_vers_2, "2" },
+ { Opt_vers_3, "3" },
+ { Opt_vers_4, "4" },
+ { Opt_vers_4_0, "4.0" },
+ { Opt_vers_4_1, "4.1" },
+
+ { Opt_vers_err, NULL }
+};
+
+static void nfs_umount_begin(struct super_block *);
+static int nfs_statfs(struct dentry *, struct kstatfs *);
+static int nfs_show_options(struct seq_file *, struct dentry *);
+static int nfs_show_devname(struct seq_file *, struct dentry *);
+static int nfs_show_path(struct seq_file *, struct dentry *);
+static int nfs_show_stats(struct seq_file *, struct dentry *);
+static struct dentry *nfs_fs_mount(struct file_system_type *,
+ int, const char *, void *);
+static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+static void nfs_put_super(struct super_block *);
+static void nfs_kill_super(struct super_block *);
+static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
+
+static struct file_system_type nfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs",
+ .mount = nfs_fs_mount,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+struct file_system_type nfs_xdev_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs",
+ .mount = nfs_xdev_mount,
+ .kill_sb = nfs_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+static const struct super_operations nfs_sops = {
+ .alloc_inode = nfs_alloc_inode,
+ .destroy_inode = nfs_destroy_inode,
+ .write_inode = nfs_write_inode,
+ .put_super = nfs_put_super,
+ .statfs = nfs_statfs,
+ .evict_inode = nfs_evict_inode,
+ .umount_begin = nfs_umount_begin,
+ .show_options = nfs_show_options,
+ .show_devname = nfs_show_devname,
+ .show_path = nfs_show_path,
+ .show_stats = nfs_show_stats,
+ .remount_fs = nfs_remount,
+};
+
+#ifdef CONFIG_NFS_V4
+static int nfs4_validate_text_mount_data(void *options,
+ struct nfs_parsed_mount_data *args, const char *dev_name);
+static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
+ struct nfs_parsed_mount_data *data);
+static struct dentry *nfs4_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+static struct dentry *nfs4_xdev_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data);
+static void nfs4_kill_super(struct super_block *sb);
+
+static struct file_system_type nfs4_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs4_mount,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+static struct file_system_type nfs4_remote_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs4_remote_mount,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+struct file_system_type nfs4_xdev_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs4_xdev_mount,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+static struct file_system_type nfs4_remote_referral_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs4_remote_referral_mount,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+struct file_system_type nfs4_referral_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "nfs4",
+ .mount = nfs4_referral_mount,
+ .kill_sb = nfs4_kill_super,
+ .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
+};
+
+static const struct super_operations nfs4_sops = {
+ .alloc_inode = nfs_alloc_inode,
+ .destroy_inode = nfs_destroy_inode,
+ .write_inode = nfs_write_inode,
+ .put_super = nfs_put_super,
+ .statfs = nfs_statfs,
+ .evict_inode = nfs4_evict_inode,
+ .umount_begin = nfs_umount_begin,
+ .show_options = nfs_show_options,
+ .show_devname = nfs_show_devname,
+ .show_path = nfs_show_path,
+ .show_stats = nfs_show_stats,
+ .remount_fs = nfs_remount,
+};
+#endif
+
+static struct shrinker acl_shrinker = {
+ .shrink = nfs_access_cache_shrinker,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/*
+ * Register the NFS filesystems
+ */
+int __init register_nfs_fs(void)
+{
+ int ret;
+
+ ret = register_filesystem(&nfs_fs_type);
+ if (ret < 0)
+ goto error_0;
+
+ ret = nfs_register_sysctl();
+ if (ret < 0)
+ goto error_1;
+#ifdef CONFIG_NFS_V4
+ ret = register_filesystem(&nfs4_fs_type);
+ if (ret < 0)
+ goto error_2;
+#endif
+ register_shrinker(&acl_shrinker);
+ return 0;
+
+#ifdef CONFIG_NFS_V4
+error_2:
+ nfs_unregister_sysctl();
+#endif
+error_1:
+ unregister_filesystem(&nfs_fs_type);
+error_0:
+ return ret;
+}
+
+/*
+ * Unregister the NFS filesystems
+ */
+void __exit unregister_nfs_fs(void)
+{
+ unregister_shrinker(&acl_shrinker);
+#ifdef CONFIG_NFS_V4
+ unregister_filesystem(&nfs4_fs_type);
+#endif
+ nfs_unregister_sysctl();
+ unregister_filesystem(&nfs_fs_type);
+}
+
+void nfs_sb_active(struct super_block *sb)
+{
+ struct nfs_server *server = NFS_SB(sb);
+
+ if (atomic_inc_return(&server->active) == 1)
+ atomic_inc(&sb->s_active);
+}
+
+void nfs_sb_deactive(struct super_block *sb)
+{
+ struct nfs_server *server = NFS_SB(sb);
+
+ if (atomic_dec_and_test(&server->active))
+ deactivate_super(sb);
+}
+
+/*
+ * Deliver file system statistics to userspace
+ */
+static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct nfs_server *server = NFS_SB(dentry->d_sb);
+ unsigned char blockbits;
+ unsigned long blockres;
+ struct nfs_fh *fh = NFS_FH(dentry->d_inode);
+ struct nfs_fsstat res;
+ int error = -ENOMEM;
+
+ res.fattr = nfs_alloc_fattr();
+ if (res.fattr == NULL)
+ goto out_err;
+
+ error = server->nfs_client->rpc_ops->statfs(server, fh, &res);
+ if (unlikely(error == -ESTALE)) {
+ struct dentry *pd_dentry;
+
+ pd_dentry = dget_parent(dentry);
+ if (pd_dentry != NULL) {
+ nfs_zap_caches(pd_dentry->d_inode);
+ dput(pd_dentry);
+ }
+ }
+ nfs_free_fattr(res.fattr);
+ if (error < 0)
+ goto out_err;
+
+ buf->f_type = NFS_SUPER_MAGIC;
+
+ /*
+ * Current versions of glibc do not correctly handle the
+ * case where f_frsize != f_bsize. Eventually we want to
+ * report the value of wtmult in this field.
+ */
+ buf->f_frsize = dentry->d_sb->s_blocksize;
+
+ /*
+ * On most *nix systems, f_blocks, f_bfree, and f_bavail
+ * are reported in units of f_frsize. Linux hasn't had
+ * an f_frsize field in its statfs struct until recently,
+ * thus historically Linux's sys_statfs reports these
+ * fields in units of f_bsize.
+ */
+ buf->f_bsize = dentry->d_sb->s_blocksize;
+ blockbits = dentry->d_sb->s_blocksize_bits;
+ blockres = (1 << blockbits) - 1;
+ buf->f_blocks = (res.tbytes + blockres) >> blockbits;
+ buf->f_bfree = (res.fbytes + blockres) >> blockbits;
+ buf->f_bavail = (res.abytes + blockres) >> blockbits;
+
+ buf->f_files = res.tfiles;
+ buf->f_ffree = res.afiles;
+
+ buf->f_namelen = server->namelen;
+
+ return 0;
+
+ out_err:
+ dprintk("%s: statfs error = %d\n", __func__, -error);
+ return error;
+}
+
+/*
+ * Map the security flavour number to a name
+ */
+static const char *nfs_pseudoflavour_to_name(rpc_authflavor_t flavour)
+{
+ static const struct {
+ rpc_authflavor_t flavour;
+ const char *str;
+ } sec_flavours[] = {
+ { RPC_AUTH_NULL, "null" },
+ { RPC_AUTH_UNIX, "sys" },
+ { RPC_AUTH_GSS_KRB5, "krb5" },
+ { RPC_AUTH_GSS_KRB5I, "krb5i" },
+ { RPC_AUTH_GSS_KRB5P, "krb5p" },
+ { RPC_AUTH_GSS_LKEY, "lkey" },
+ { RPC_AUTH_GSS_LKEYI, "lkeyi" },
+ { RPC_AUTH_GSS_LKEYP, "lkeyp" },
+ { RPC_AUTH_GSS_SPKM, "spkm" },
+ { RPC_AUTH_GSS_SPKMI, "spkmi" },
+ { RPC_AUTH_GSS_SPKMP, "spkmp" },
+ { UINT_MAX, "unknown" }
+ };
+ int i;
+
+ for (i = 0; sec_flavours[i].flavour != UINT_MAX; i++) {
+ if (sec_flavours[i].flavour == flavour)
+ break;
+ }
+ return sec_flavours[i].str;
+}
+
+static void nfs_show_mountd_netid(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ struct sockaddr *sap = (struct sockaddr *) &nfss->mountd_address;
+
+ seq_printf(m, ",mountproto=");
+ switch (sap->sa_family) {
+ case AF_INET:
+ switch (nfss->mountd_protocol) {
+ case IPPROTO_UDP:
+ seq_printf(m, RPCBIND_NETID_UDP);
+ break;
+ case IPPROTO_TCP:
+ seq_printf(m, RPCBIND_NETID_TCP);
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+ break;
+ case AF_INET6:
+ switch (nfss->mountd_protocol) {
+ case IPPROTO_UDP:
+ seq_printf(m, RPCBIND_NETID_UDP6);
+ break;
+ case IPPROTO_TCP:
+ seq_printf(m, RPCBIND_NETID_TCP6);
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+ break;
+ default:
+ if (showdefaults)
+ seq_printf(m, "auto");
+ }
+}
+
+static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ struct sockaddr *sap = (struct sockaddr *)&nfss->mountd_address;
+
+ if (nfss->flags & NFS_MOUNT_LEGACY_INTERFACE)
+ return;
+
+ switch (sap->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin = (struct sockaddr_in *)sap;
+ seq_printf(m, ",mountaddr=%pI4", &sin->sin_addr.s_addr);
+ break;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
+ seq_printf(m, ",mountaddr=%pI6c", &sin6->sin6_addr);
+ break;
+ }
+ default:
+ if (showdefaults)
+ seq_printf(m, ",mountaddr=unspecified");
+ }
+
+ if (nfss->mountd_version || showdefaults)
+ seq_printf(m, ",mountvers=%u", nfss->mountd_version);
+ if ((nfss->mountd_port &&
+ nfss->mountd_port != (unsigned short)NFS_UNSPEC_PORT) ||
+ showdefaults)
+ seq_printf(m, ",mountport=%u", nfss->mountd_port);
+
+ nfs_show_mountd_netid(m, nfss, showdefaults);
+}
+
+#ifdef CONFIG_NFS_V4
+static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ struct nfs_client *clp = nfss->nfs_client;
+
+ seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
+}
+#else
+static void nfs_show_nfsv4_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+}
+#endif
+
+static void nfs_show_nfs_version(struct seq_file *m,
+ unsigned int version,
+ unsigned int minorversion)
+{
+ seq_printf(m, ",vers=%u", version);
+ if (version == 4)
+ seq_printf(m, ".%u", minorversion);
+}
+
+/*
+ * Describe the mount options in force on this server representation
+ */
+static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
+ int showdefaults)
+{
+ static const struct proc_nfs_info {
+ int flag;
+ const char *str;
+ const char *nostr;
+ } nfs_info[] = {
+ { NFS_MOUNT_SOFT, ",soft", ",hard" },
+ { NFS_MOUNT_POSIX, ",posix", "" },
+ { NFS_MOUNT_NOCTO, ",nocto", "" },
+ { NFS_MOUNT_NOAC, ",noac", "" },
+ { NFS_MOUNT_NONLM, ",nolock", "" },
+ { NFS_MOUNT_NOACL, ",noacl", "" },
+ { NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
+ { NFS_MOUNT_UNSHARED, ",nosharecache", "" },
+ { NFS_MOUNT_NORESVPORT, ",noresvport", "" },
+ { 0, NULL, NULL }
+ };
+ const struct proc_nfs_info *nfs_infop;
+ struct nfs_client *clp = nfss->nfs_client;
+ u32 version = clp->rpc_ops->version;
+ int local_flock, local_fcntl;
+
+ nfs_show_nfs_version(m, version, clp->cl_minorversion);
+ seq_printf(m, ",rsize=%u", nfss->rsize);
+ seq_printf(m, ",wsize=%u", nfss->wsize);
+ if (nfss->bsize != 0)
+ seq_printf(m, ",bsize=%u", nfss->bsize);
+ seq_printf(m, ",namlen=%u", nfss->namelen);
+ if (nfss->acregmin != NFS_DEF_ACREGMIN*HZ || showdefaults)
+ seq_printf(m, ",acregmin=%u", nfss->acregmin/HZ);
+ if (nfss->acregmax != NFS_DEF_ACREGMAX*HZ || showdefaults)
+ seq_printf(m, ",acregmax=%u", nfss->acregmax/HZ);
+ if (nfss->acdirmin != NFS_DEF_ACDIRMIN*HZ || showdefaults)
+ seq_printf(m, ",acdirmin=%u", nfss->acdirmin/HZ);
+ if (nfss->acdirmax != NFS_DEF_ACDIRMAX*HZ || showdefaults)
+ seq_printf(m, ",acdirmax=%u", nfss->acdirmax/HZ);
+ for (nfs_infop = nfs_info; nfs_infop->flag; nfs_infop++) {
+ if (nfss->flags & nfs_infop->flag)
+ seq_puts(m, nfs_infop->str);
+ else
+ seq_puts(m, nfs_infop->nostr);
+ }
+ rcu_read_lock();
+ seq_printf(m, ",proto=%s",
+ rpc_peeraddr2str(nfss->client, RPC_DISPLAY_NETID));
+ rcu_read_unlock();
+ if (version == 4) {
+ if (nfss->port != NFS_PORT)
+ seq_printf(m, ",port=%u", nfss->port);
+ } else
+ if (nfss->port)
+ seq_printf(m, ",port=%u", nfss->port);
+
+ seq_printf(m, ",timeo=%lu", 10U * nfss->client->cl_timeout->to_initval / HZ);
+ seq_printf(m, ",retrans=%u", nfss->client->cl_timeout->to_retries);
+ seq_printf(m, ",sec=%s", nfs_pseudoflavour_to_name(nfss->client->cl_auth->au_flavor));
+
+ if (version != 4)
+ nfs_show_mountd_options(m, nfss, showdefaults);
+ else
+ nfs_show_nfsv4_options(m, nfss, showdefaults);
+
+ if (nfss->options & NFS_OPTION_FSCACHE)
+ seq_printf(m, ",fsc");
+
+ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) {
+ if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
+ seq_printf(m, ",lookupcache=none");
+ else
+ seq_printf(m, ",lookupcache=pos");
+ }
+
+ local_flock = nfss->flags & NFS_MOUNT_LOCAL_FLOCK;
+ local_fcntl = nfss->flags & NFS_MOUNT_LOCAL_FCNTL;
+
+ if (!local_flock && !local_fcntl)
+ seq_printf(m, ",local_lock=none");
+ else if (local_flock && local_fcntl)
+ seq_printf(m, ",local_lock=all");
+ else if (local_flock)
+ seq_printf(m, ",local_lock=flock");
+ else
+ seq_printf(m, ",local_lock=posix");
+}
+
+/*
+ * Describe the mount options on this VFS mountpoint
+ */
+static int nfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct nfs_server *nfss = NFS_SB(root->d_sb);
+
+ nfs_show_mount_options(m, nfss, 0);
+
+ rcu_read_lock();
+ seq_printf(m, ",addr=%s",
+ rpc_peeraddr2str(nfss->nfs_client->cl_rpcclient,
+ RPC_DISPLAY_ADDR));
+ rcu_read_unlock();
+
+ return 0;
+}
+
+#ifdef CONFIG_NFS_V4
+#ifdef CONFIG_NFS_V4_1
+static void show_sessions(struct seq_file *m, struct nfs_server *server)
+{
+ if (nfs4_has_session(server->nfs_client))
+ seq_printf(m, ",sessions");
+}
+#else
+static void show_sessions(struct seq_file *m, struct nfs_server *server) {}
+#endif
+#endif
+
+#ifdef CONFIG_NFS_V4_1
+static void show_pnfs(struct seq_file *m, struct nfs_server *server)
+{
+ seq_printf(m, ",pnfs=");
+ if (server->pnfs_curr_ld)
+ seq_printf(m, "%s", server->pnfs_curr_ld->name);
+ else
+ seq_printf(m, "not configured");
+}
+
+static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
+{
+ if (nfss->nfs_client && nfss->nfs_client->impl_id) {
+ struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id;
+ seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s',"
+ "date='%llu,%u'",
+ impl_id->name, impl_id->domain,
+ impl_id->date.seconds, impl_id->date.nseconds);
+ }
+}
+#else
+#ifdef CONFIG_NFS_V4
+static void show_pnfs(struct seq_file *m, struct nfs_server *server)
+{
+}
+#endif
+static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
+{
+}
+#endif
+
+static int nfs_show_devname(struct seq_file *m, struct dentry *root)
+{
+ char *page = (char *) __get_free_page(GFP_KERNEL);
+ char *devname, *dummy;
+ int err = 0;
+ if (!page)
+ return -ENOMEM;
+ devname = nfs_path(&dummy, root, page, PAGE_SIZE);
+ if (IS_ERR(devname))
+ err = PTR_ERR(devname);
+ else
+ seq_escape(m, devname, " \t\n\\");
+ free_page((unsigned long)page);
+ return err;
+}
+
+static int nfs_show_path(struct seq_file *m, struct dentry *dentry)
+{
+ seq_puts(m, "/");
+ return 0;
+}
+
+/*
+ * Present statistical information for this VFS mountpoint
+ */
+static int nfs_show_stats(struct seq_file *m, struct dentry *root)
+{
+ int i, cpu;
+ struct nfs_server *nfss = NFS_SB(root->d_sb);
+ struct rpc_auth *auth = nfss->client->cl_auth;
+ struct nfs_iostats totals = { };
+
+ seq_printf(m, "statvers=%s", NFS_IOSTAT_VERS);
+
+ /*
+ * Display all mount option settings
+ */
+ seq_printf(m, "\n\topts:\t");
+ seq_puts(m, root->d_sb->s_flags & MS_RDONLY ? "ro" : "rw");
+ seq_puts(m, root->d_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
+ seq_puts(m, root->d_sb->s_flags & MS_NOATIME ? ",noatime" : "");
+ seq_puts(m, root->d_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
+ nfs_show_mount_options(m, nfss, 1);
+
+ seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ);
+
+ show_implementation_id(m, nfss);
+
+ seq_printf(m, "\n\tcaps:\t");
+ seq_printf(m, "caps=0x%x", nfss->caps);
+ seq_printf(m, ",wtmult=%u", nfss->wtmult);
+ seq_printf(m, ",dtsize=%u", nfss->dtsize);
+ seq_printf(m, ",bsize=%u", nfss->bsize);
+ seq_printf(m, ",namlen=%u", nfss->namelen);
+
+#ifdef CONFIG_NFS_V4
+ if (nfss->nfs_client->rpc_ops->version == 4) {
+ seq_printf(m, "\n\tnfsv4:\t");
+ seq_printf(m, "bm0=0x%x", nfss->attr_bitmask[0]);
+ seq_printf(m, ",bm1=0x%x", nfss->attr_bitmask[1]);
+ seq_printf(m, ",acl=0x%x", nfss->acl_bitmask);
+ show_sessions(m, nfss);
+ show_pnfs(m, nfss);
+ }
+#endif
+
+ /*
+ * Display security flavor in effect for this mount
+ */
+ seq_printf(m, "\n\tsec:\tflavor=%u", auth->au_ops->au_flavor);
+ if (auth->au_flavor)
+ seq_printf(m, ",pseudoflavor=%u", auth->au_flavor);
+
+ /*
+ * Display superblock I/O counters
+ */
+ for_each_possible_cpu(cpu) {
+ struct nfs_iostats *stats;
+
+ preempt_disable();
+ stats = per_cpu_ptr(nfss->io_stats, cpu);
+
+ for (i = 0; i < __NFSIOS_COUNTSMAX; i++)
+ totals.events[i] += stats->events[i];
+ for (i = 0; i < __NFSIOS_BYTESMAX; i++)
+ totals.bytes[i] += stats->bytes[i];
+#ifdef CONFIG_NFS_FSCACHE
+ for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
+ totals.fscache[i] += stats->fscache[i];
+#endif
+
+ preempt_enable();
+ }
+
+ seq_printf(m, "\n\tevents:\t");
+ for (i = 0; i < __NFSIOS_COUNTSMAX; i++)
+ seq_printf(m, "%lu ", totals.events[i]);
+ seq_printf(m, "\n\tbytes:\t");
+ for (i = 0; i < __NFSIOS_BYTESMAX; i++)
+ seq_printf(m, "%Lu ", totals.bytes[i]);
+#ifdef CONFIG_NFS_FSCACHE
+ if (nfss->options & NFS_OPTION_FSCACHE) {
+ seq_printf(m, "\n\tfsc:\t");
+ for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
+ seq_printf(m, "%Lu ", totals.bytes[i]);
+ }
+#endif
+ seq_printf(m, "\n");
+
+ rpc_print_iostats(m, nfss->client);
+
+ return 0;
+}
+
+/*
+ * Begin unmount by attempting to remove all automounted mountpoints we added
+ * in response to xdev traversals and referrals
+ */
+static void nfs_umount_begin(struct super_block *sb)
+{
+ struct nfs_server *server;
+ struct rpc_clnt *rpc;
+
+ server = NFS_SB(sb);
+ /* -EIO all pending I/O */
+ rpc = server->client_acl;
+ if (!IS_ERR(rpc))
+ rpc_killall_tasks(rpc);
+ rpc = server->client;
+ if (!IS_ERR(rpc))
+ rpc_killall_tasks(rpc);
+}
+
+static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version)
+{
+ struct nfs_parsed_mount_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data) {
+ data->acregmin = NFS_DEF_ACREGMIN;
+ data->acregmax = NFS_DEF_ACREGMAX;
+ data->acdirmin = NFS_DEF_ACDIRMIN;
+ data->acdirmax = NFS_DEF_ACDIRMAX;
+ data->mount_server.port = NFS_UNSPEC_PORT;
+ data->nfs_server.port = NFS_UNSPEC_PORT;
+ data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ data->auth_flavors[0] = RPC_AUTH_UNIX;
+ data->auth_flavor_len = 1;
+ data->version = version;
+ data->minorversion = 0;
+ data->net = current->nsproxy->net_ns;
+ security_init_mnt_opts(&data->lsm_opts);
+ }
+ return data;
+}
+
+static void nfs_free_parsed_mount_data(struct nfs_parsed_mount_data *data)
+{
+ if (data) {
+ kfree(data->client_address);
+ kfree(data->mount_server.hostname);
+ kfree(data->nfs_server.export_path);
+ kfree(data->nfs_server.hostname);
+ kfree(data->fscache_uniq);
+ security_free_mnt_opts(&data->lsm_opts);
+ kfree(data);
+ }
+}
+
+/*
+ * Sanity-check a server address provided by the mount command.
+ *
+ * Address family must be initialized, and address must not be
+ * the ANY address for that family.
+ */
+static int nfs_verify_server_address(struct sockaddr *addr)
+{
+ switch (addr->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sa = (struct sockaddr_in *)addr;
+ return sa->sin_addr.s_addr != htonl(INADDR_ANY);
+ }
+ case AF_INET6: {
+ struct in6_addr *sa = &((struct sockaddr_in6 *)addr)->sin6_addr;
+ return !ipv6_addr_any(sa);
+ }
+ }
+
+ dfprintk(MOUNT, "NFS: Invalid IP address specified\n");
+ return 0;
+}
+
+/*
+ * Select between a default port value and a user-specified port value.
+ * If a zero value is set, then autobind will be used.
+ */
+static void nfs_set_port(struct sockaddr *sap, int *port,
+ const unsigned short default_port)
+{
+ if (*port == NFS_UNSPEC_PORT)
+ *port = default_port;
+
+ rpc_set_port(sap, *port);
+}
+
+/*
+ * Sanity check the NFS transport protocol.
+ *
+ */
+static void nfs_validate_transport_protocol(struct nfs_parsed_mount_data *mnt)
+{
+ switch (mnt->nfs_server.protocol) {
+ case XPRT_TRANSPORT_UDP:
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_RDMA:
+ break;
+ default:
+ mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ }
+}
+
+/*
+ * For text based NFSv2/v3 mounts, the mount protocol transport default
+ * settings should depend upon the specified NFS transport.
+ */
+static void nfs_set_mount_transport_protocol(struct nfs_parsed_mount_data *mnt)
+{
+ nfs_validate_transport_protocol(mnt);
+
+ if (mnt->mount_server.protocol == XPRT_TRANSPORT_UDP ||
+ mnt->mount_server.protocol == XPRT_TRANSPORT_TCP)
+ return;
+ switch (mnt->nfs_server.protocol) {
+ case XPRT_TRANSPORT_UDP:
+ mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
+ break;
+ case XPRT_TRANSPORT_TCP:
+ case XPRT_TRANSPORT_RDMA:
+ mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
+ }
+}
+
+/*
+ * Parse the value of the 'sec=' option.
+ */
+static int nfs_parse_security_flavors(char *value,
+ struct nfs_parsed_mount_data *mnt)
+{
+ substring_t args[MAX_OPT_ARGS];
+
+ dfprintk(MOUNT, "NFS: parsing sec=%s option\n", value);
+
+ switch (match_token(value, nfs_secflavor_tokens, args)) {
+ case Opt_sec_none:
+ mnt->auth_flavors[0] = RPC_AUTH_NULL;
+ break;
+ case Opt_sec_sys:
+ mnt->auth_flavors[0] = RPC_AUTH_UNIX;
+ break;
+ case Opt_sec_krb5:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5;
+ break;
+ case Opt_sec_krb5i:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5I;
+ break;
+ case Opt_sec_krb5p:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_KRB5P;
+ break;
+ case Opt_sec_lkey:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEY;
+ break;
+ case Opt_sec_lkeyi:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYI;
+ break;
+ case Opt_sec_lkeyp:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_LKEYP;
+ break;
+ case Opt_sec_spkm:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKM;
+ break;
+ case Opt_sec_spkmi:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMI;
+ break;
+ case Opt_sec_spkmp:
+ mnt->auth_flavors[0] = RPC_AUTH_GSS_SPKMP;
+ break;
+ default:
+ return 0;
+ }
+
+ mnt->flags |= NFS_MOUNT_SECFLAVOUR;
+ mnt->auth_flavor_len = 1;
+ return 1;
+}
+
+static int nfs_parse_version_string(char *string,
+ struct nfs_parsed_mount_data *mnt,
+ substring_t *args)
+{
+ mnt->flags &= ~NFS_MOUNT_VER3;
+ switch (match_token(string, nfs_vers_tokens, args)) {
+ case Opt_vers_2:
+ mnt->version = 2;
+ break;
+ case Opt_vers_3:
+ mnt->flags |= NFS_MOUNT_VER3;
+ mnt->version = 3;
+ break;
+ case Opt_vers_4:
+ /* Backward compatibility option. In future,
+ * the mount program should always supply
+ * a NFSv4 minor version number.
+ */
+ mnt->version = 4;
+ break;
+ case Opt_vers_4_0:
+ mnt->version = 4;
+ mnt->minorversion = 0;
+ break;
+ case Opt_vers_4_1:
+ mnt->version = 4;
+ mnt->minorversion = 1;
+ break;
+ default:
+ return 0;
+ }
+ return 1;
+}
+
+static int nfs_get_option_str(substring_t args[], char **option)
+{
+ kfree(*option);
+ *option = match_strdup(args);
+ return !option;
+}
+
+static int nfs_get_option_ul(substring_t args[], unsigned long *option)
+{
+ int rc;
+ char *string;
+
+ string = match_strdup(args);
+ if (string == NULL)
+ return -ENOMEM;
+ rc = strict_strtoul(string, 10, option);
+ kfree(string);
+
+ return rc;
+}
+
+/*
+ * Error-check and convert a string of mount options from user space into
+ * a data structure. The whole mount string is processed; bad options are
+ * skipped as they are encountered. If there were no errors, return 1;
+ * otherwise return 0 (zero).
+ */
+static int nfs_parse_mount_options(char *raw,
+ struct nfs_parsed_mount_data *mnt)
+{
+ char *p, *string, *secdata;
+ int rc, sloppy = 0, invalid_option = 0;
+ unsigned short protofamily = AF_UNSPEC;
+ unsigned short mountfamily = AF_UNSPEC;
+
+ if (!raw) {
+ dfprintk(MOUNT, "NFS: mount options string was NULL.\n");
+ return 1;
+ }
+ dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw);
+
+ secdata = alloc_secdata();
+ if (!secdata)
+ goto out_nomem;
+
+ rc = security_sb_copy_data(raw, secdata);
+ if (rc)
+ goto out_security_failure;
+
+ rc = security_sb_parse_opts_str(secdata, &mnt->lsm_opts);
+ if (rc)
+ goto out_security_failure;
+
+ free_secdata(secdata);
+
+ while ((p = strsep(&raw, ",")) != NULL) {
+ substring_t args[MAX_OPT_ARGS];
+ unsigned long option;
+ int token;
+
+ if (!*p)
+ continue;
+
+ dfprintk(MOUNT, "NFS: parsing nfs mount option '%s'\n", p);
+
+ token = match_token(p, nfs_mount_option_tokens, args);
+ switch (token) {
+
+ /*
+ * boolean options: foo/nofoo
+ */
+ case Opt_soft:
+ mnt->flags |= NFS_MOUNT_SOFT;
+ break;
+ case Opt_hard:
+ mnt->flags &= ~NFS_MOUNT_SOFT;
+ break;
+ case Opt_posix:
+ mnt->flags |= NFS_MOUNT_POSIX;
+ break;
+ case Opt_noposix:
+ mnt->flags &= ~NFS_MOUNT_POSIX;
+ break;
+ case Opt_cto:
+ mnt->flags &= ~NFS_MOUNT_NOCTO;
+ break;
+ case Opt_nocto:
+ mnt->flags |= NFS_MOUNT_NOCTO;
+ break;
+ case Opt_ac:
+ mnt->flags &= ~NFS_MOUNT_NOAC;
+ break;
+ case Opt_noac:
+ mnt->flags |= NFS_MOUNT_NOAC;
+ break;
+ case Opt_lock:
+ mnt->flags &= ~NFS_MOUNT_NONLM;
+ mnt->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ break;
+ case Opt_nolock:
+ mnt->flags |= NFS_MOUNT_NONLM;
+ mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ break;
+ case Opt_udp:
+ mnt->flags &= ~NFS_MOUNT_TCP;
+ mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ break;
+ case Opt_tcp:
+ mnt->flags |= NFS_MOUNT_TCP;
+ mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ break;
+ case Opt_rdma:
+ mnt->flags |= NFS_MOUNT_TCP; /* for side protocols */
+ mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
+ xprt_load_transport(p);
+ break;
+ case Opt_acl:
+ mnt->flags &= ~NFS_MOUNT_NOACL;
+ break;
+ case Opt_noacl:
+ mnt->flags |= NFS_MOUNT_NOACL;
+ break;
+ case Opt_rdirplus:
+ mnt->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ break;
+ case Opt_nordirplus:
+ mnt->flags |= NFS_MOUNT_NORDIRPLUS;
+ break;
+ case Opt_sharecache:
+ mnt->flags &= ~NFS_MOUNT_UNSHARED;
+ break;
+ case Opt_nosharecache:
+ mnt->flags |= NFS_MOUNT_UNSHARED;
+ break;
+ case Opt_resvport:
+ mnt->flags &= ~NFS_MOUNT_NORESVPORT;
+ break;
+ case Opt_noresvport:
+ mnt->flags |= NFS_MOUNT_NORESVPORT;
+ break;
+ case Opt_fscache:
+ mnt->options |= NFS_OPTION_FSCACHE;
+ kfree(mnt->fscache_uniq);
+ mnt->fscache_uniq = NULL;
+ break;
+ case Opt_nofscache:
+ mnt->options &= ~NFS_OPTION_FSCACHE;
+ kfree(mnt->fscache_uniq);
+ mnt->fscache_uniq = NULL;
+ break;
+
+ /*
+ * options that take numeric values
+ */
+ case Opt_port:
+ if (nfs_get_option_ul(args, &option) ||
+ option > USHRT_MAX)
+ goto out_invalid_value;
+ mnt->nfs_server.port = option;
+ break;
+ case Opt_rsize:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->rsize = option;
+ break;
+ case Opt_wsize:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->wsize = option;
+ break;
+ case Opt_bsize:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->bsize = option;
+ break;
+ case Opt_timeo:
+ if (nfs_get_option_ul(args, &option) || option == 0)
+ goto out_invalid_value;
+ mnt->timeo = option;
+ break;
+ case Opt_retrans:
+ if (nfs_get_option_ul(args, &option) || option == 0)
+ goto out_invalid_value;
+ mnt->retrans = option;
+ break;
+ case Opt_acregmin:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->acregmin = option;
+ break;
+ case Opt_acregmax:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->acregmax = option;
+ break;
+ case Opt_acdirmin:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->acdirmin = option;
+ break;
+ case Opt_acdirmax:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->acdirmax = option;
+ break;
+ case Opt_actimeo:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->acregmin = mnt->acregmax =
+ mnt->acdirmin = mnt->acdirmax = option;
+ break;
+ case Opt_namelen:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ mnt->namlen = option;
+ break;
+ case Opt_mountport:
+ if (nfs_get_option_ul(args, &option) ||
+ option > USHRT_MAX)
+ goto out_invalid_value;
+ mnt->mount_server.port = option;
+ break;
+ case Opt_mountvers:
+ if (nfs_get_option_ul(args, &option) ||
+ option < NFS_MNT_VERSION ||
+ option > NFS_MNT3_VERSION)
+ goto out_invalid_value;
+ mnt->mount_server.version = option;
+ break;
+ case Opt_minorversion:
+ if (nfs_get_option_ul(args, &option))
+ goto out_invalid_value;
+ if (option > NFS4_MAX_MINOR_VERSION)
+ goto out_invalid_value;
+ mnt->minorversion = option;
+ break;
+
+ /*
+ * options that take text values
+ */
+ case Opt_nfsvers:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = nfs_parse_version_string(string, mnt, args);
+ kfree(string);
+ if (!rc)
+ goto out_invalid_value;
+ break;
+ case Opt_sec:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ rc = nfs_parse_security_flavors(string, mnt);
+ kfree(string);
+ if (!rc) {
+ dfprintk(MOUNT, "NFS: unrecognized "
+ "security flavor\n");
+ return 0;
+ }
+ break;
+ case Opt_proto:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ token = match_token(string,
+ nfs_xprt_protocol_tokens, args);
+
+ protofamily = AF_INET;
+ switch (token) {
+ case Opt_xprt_udp6:
+ protofamily = AF_INET6;
+ case Opt_xprt_udp:
+ mnt->flags &= ~NFS_MOUNT_TCP;
+ mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ break;
+ case Opt_xprt_tcp6:
+ protofamily = AF_INET6;
+ case Opt_xprt_tcp:
+ mnt->flags |= NFS_MOUNT_TCP;
+ mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
+ break;
+ case Opt_xprt_rdma:
+ /* vector side protocols to TCP */
+ mnt->flags |= NFS_MOUNT_TCP;
+ mnt->nfs_server.protocol = XPRT_TRANSPORT_RDMA;
+ xprt_load_transport(string);
+ break;
+ default:
+ dfprintk(MOUNT, "NFS: unrecognized "
+ "transport protocol\n");
+ kfree(string);
+ return 0;
+ }
+ kfree(string);
+ break;
+ case Opt_mountproto:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ token = match_token(string,
+ nfs_xprt_protocol_tokens, args);
+ kfree(string);
+
+ mountfamily = AF_INET;
+ switch (token) {
+ case Opt_xprt_udp6:
+ mountfamily = AF_INET6;
+ case Opt_xprt_udp:
+ mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
+ break;
+ case Opt_xprt_tcp6:
+ mountfamily = AF_INET6;
+ case Opt_xprt_tcp:
+ mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
+ break;
+ case Opt_xprt_rdma: /* not used for side protocols */
+ default:
+ dfprintk(MOUNT, "NFS: unrecognized "
+ "transport protocol\n");
+ return 0;
+ }
+ break;
+ case Opt_addr:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ mnt->nfs_server.addrlen =
+ rpc_pton(mnt->net, string, strlen(string),
+ (struct sockaddr *)
+ &mnt->nfs_server.address,
+ sizeof(mnt->nfs_server.address));
+ kfree(string);
+ if (mnt->nfs_server.addrlen == 0)
+ goto out_invalid_address;
+ break;
+ case Opt_clientaddr:
+ if (nfs_get_option_str(args, &mnt->client_address))
+ goto out_nomem;
+ break;
+ case Opt_mounthost:
+ if (nfs_get_option_str(args,
+ &mnt->mount_server.hostname))
+ goto out_nomem;
+ break;
+ case Opt_mountaddr:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ mnt->mount_server.addrlen =
+ rpc_pton(mnt->net, string, strlen(string),
+ (struct sockaddr *)
+ &mnt->mount_server.address,
+ sizeof(mnt->mount_server.address));
+ kfree(string);
+ if (mnt->mount_server.addrlen == 0)
+ goto out_invalid_address;
+ break;
+ case Opt_lookupcache:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ token = match_token(string,
+ nfs_lookupcache_tokens, args);
+ kfree(string);
+ switch (token) {
+ case Opt_lookupcache_all:
+ mnt->flags &= ~(NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE);
+ break;
+ case Opt_lookupcache_positive:
+ mnt->flags &= ~NFS_MOUNT_LOOKUP_CACHE_NONE;
+ mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG;
+ break;
+ case Opt_lookupcache_none:
+ mnt->flags |= NFS_MOUNT_LOOKUP_CACHE_NONEG|NFS_MOUNT_LOOKUP_CACHE_NONE;
+ break;
+ default:
+ dfprintk(MOUNT, "NFS: invalid "
+ "lookupcache argument\n");
+ return 0;
+ };
+ break;
+ case Opt_fscache_uniq:
+ if (nfs_get_option_str(args, &mnt->fscache_uniq))
+ goto out_nomem;
+ mnt->options |= NFS_OPTION_FSCACHE;
+ break;
+ case Opt_local_lock:
+ string = match_strdup(args);
+ if (string == NULL)
+ goto out_nomem;
+ token = match_token(string, nfs_local_lock_tokens,
+ args);
+ kfree(string);
+ switch (token) {
+ case Opt_local_lock_all:
+ mnt->flags |= (NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ break;
+ case Opt_local_lock_flock:
+ mnt->flags |= NFS_MOUNT_LOCAL_FLOCK;
+ break;
+ case Opt_local_lock_posix:
+ mnt->flags |= NFS_MOUNT_LOCAL_FCNTL;
+ break;
+ case Opt_local_lock_none:
+ mnt->flags &= ~(NFS_MOUNT_LOCAL_FLOCK |
+ NFS_MOUNT_LOCAL_FCNTL);
+ break;
+ default:
+ dfprintk(MOUNT, "NFS: invalid "
+ "local_lock argument\n");
+ return 0;
+ };
+ break;
+
+ /*
+ * Special options
+ */
+ case Opt_sloppy:
+ sloppy = 1;
+ dfprintk(MOUNT, "NFS: relaxing parsing rules\n");
+ break;
+ case Opt_userspace:
+ case Opt_deprecated:
+ dfprintk(MOUNT, "NFS: ignoring mount option "
+ "'%s'\n", p);
+ break;
+
+ default:
+ invalid_option = 1;
+ dfprintk(MOUNT, "NFS: unrecognized mount option "
+ "'%s'\n", p);
+ }
+ }
+
+ if (!sloppy && invalid_option)
+ return 0;
+
+ if (mnt->minorversion && mnt->version != 4)
+ goto out_minorversion_mismatch;
+
+ /*
+ * verify that any proto=/mountproto= options match the address
+ * familiies in the addr=/mountaddr= options.
+ */
+ if (protofamily != AF_UNSPEC &&
+ protofamily != mnt->nfs_server.address.ss_family)
+ goto out_proto_mismatch;
+
+ if (mountfamily != AF_UNSPEC) {
+ if (mnt->mount_server.addrlen) {
+ if (mountfamily != mnt->mount_server.address.ss_family)
+ goto out_mountproto_mismatch;
+ } else {
+ if (mountfamily != mnt->nfs_server.address.ss_family)
+ goto out_mountproto_mismatch;
+ }
+ }
+
+ return 1;
+
+out_mountproto_mismatch:
+ printk(KERN_INFO "NFS: mount server address does not match mountproto= "
+ "option\n");
+ return 0;
+out_proto_mismatch:
+ printk(KERN_INFO "NFS: server address does not match proto= option\n");
+ return 0;
+out_invalid_address:
+ printk(KERN_INFO "NFS: bad IP address specified: %s\n", p);
+ return 0;
+out_invalid_value:
+ printk(KERN_INFO "NFS: bad mount option value specified: %s\n", p);
+ return 0;
+out_minorversion_mismatch:
+ printk(KERN_INFO "NFS: mount option vers=%u does not support "
+ "minorversion=%u\n", mnt->version, mnt->minorversion);
+ return 0;
+out_nomem:
+ printk(KERN_INFO "NFS: not enough memory to parse option\n");
+ return 0;
+out_security_failure:
+ free_secdata(secdata);
+ printk(KERN_INFO "NFS: security options invalid: %d\n", rc);
+ return 0;
+}
+
+/*
+ * Match the requested auth flavors with the list returned by
+ * the server. Returns zero and sets the mount's authentication
+ * flavor on success; returns -EACCES if server does not support
+ * the requested flavor.
+ */
+static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
+ struct nfs_mount_request *request)
+{
+ unsigned int i, j, server_authlist_len = *(request->auth_flav_len);
+
+ /*
+ * Certain releases of Linux's mountd return an empty
+ * flavor list. To prevent behavioral regression with
+ * these servers (ie. rejecting mounts that used to
+ * succeed), revert to pre-2.6.32 behavior (no checking)
+ * if the returned flavor list is empty.
+ */
+ if (server_authlist_len == 0)
+ return 0;
+
+ /*
+ * We avoid sophisticated negotiating here, as there are
+ * plenty of cases where we can get it wrong, providing
+ * either too little or too much security.
+ *
+ * RFC 2623, section 2.7 suggests we SHOULD prefer the
+ * flavor listed first. However, some servers list
+ * AUTH_NULL first. Our caller plants AUTH_SYS, the
+ * preferred default, in args->auth_flavors[0] if user
+ * didn't specify sec= mount option.
+ */
+ for (i = 0; i < args->auth_flavor_len; i++)
+ for (j = 0; j < server_authlist_len; j++)
+ if (args->auth_flavors[i] == request->auth_flavs[j]) {
+ dfprintk(MOUNT, "NFS: using auth flavor %d\n",
+ request->auth_flavs[j]);
+ args->auth_flavors[0] = request->auth_flavs[j];
+ return 0;
+ }
+
+ dfprintk(MOUNT, "NFS: server does not support requested auth flavor\n");
+ nfs_umount(request);
+ return -EACCES;
+}
+
+/*
+ * Use the remote server's MOUNT service to request the NFS file handle
+ * corresponding to the provided path.
+ */
+static int nfs_try_mount(struct nfs_parsed_mount_data *args,
+ struct nfs_fh *root_fh)
+{
+ rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
+ unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
+ struct nfs_mount_request request = {
+ .sap = (struct sockaddr *)
+ &args->mount_server.address,
+ .dirpath = args->nfs_server.export_path,
+ .protocol = args->mount_server.protocol,
+ .fh = root_fh,
+ .noresvport = args->flags & NFS_MOUNT_NORESVPORT,
+ .auth_flav_len = &server_authlist_len,
+ .auth_flavs = server_authlist,
+ .net = args->net,
+ };
+ int status;
+
+ if (args->mount_server.version == 0) {
+ switch (args->version) {
+ default:
+ args->mount_server.version = NFS_MNT3_VERSION;
+ break;
+ case 2:
+ args->mount_server.version = NFS_MNT_VERSION;
+ }
+ }
+ request.version = args->mount_server.version;
+
+ if (args->mount_server.hostname)
+ request.hostname = args->mount_server.hostname;
+ else
+ request.hostname = args->nfs_server.hostname;
+
+ /*
+ * Construct the mount server's address.
+ */
+ if (args->mount_server.address.ss_family == AF_UNSPEC) {
+ memcpy(request.sap, &args->nfs_server.address,
+ args->nfs_server.addrlen);
+ args->mount_server.addrlen = args->nfs_server.addrlen;
+ }
+ request.salen = args->mount_server.addrlen;
+ nfs_set_port(request.sap, &args->mount_server.port, 0);
+
+ /*
+ * Now ask the mount server to map our export path
+ * to a file handle.
+ */
+ status = nfs_mount(&request);
+ if (status != 0) {
+ dfprintk(MOUNT, "NFS: unable to mount server %s, error %d\n",
+ request.hostname, status);
+ return status;
+ }
+
+ /*
+ * MNTv1 (NFSv2) does not support auth flavor negotiation.
+ */
+ if (args->mount_server.version != NFS_MNT3_VERSION)
+ return 0;
+ return nfs_walk_authlist(args, &request);
+}
+
+/*
+ * Split "dev_name" into "hostname:export_path".
+ *
+ * The leftmost colon demarks the split between the server's hostname
+ * and the export path. If the hostname starts with a left square
+ * bracket, then it may contain colons.
+ *
+ * Note: caller frees hostname and export path, even on error.
+ */
+static int nfs_parse_devname(const char *dev_name,
+ char **hostname, size_t maxnamlen,
+ char **export_path, size_t maxpathlen)
+{
+ size_t len;
+ char *end;
+
+ /* Is the host name protected with square brakcets? */
+ if (*dev_name == '[') {
+ end = strchr(++dev_name, ']');
+ if (end == NULL || end[1] != ':')
+ goto out_bad_devname;
+
+ len = end - dev_name;
+ end++;
+ } else {
+ char *comma;
+
+ end = strchr(dev_name, ':');
+ if (end == NULL)
+ goto out_bad_devname;
+ len = end - dev_name;
+
+ /* kill possible hostname list: not supported */
+ comma = strchr(dev_name, ',');
+ if (comma != NULL && comma < end)
+ *comma = 0;
+ }
+
+ if (len > maxnamlen)
+ goto out_hostname;
+
+ /* N.B. caller will free nfs_server.hostname in all cases */
+ *hostname = kstrndup(dev_name, len, GFP_KERNEL);
+ if (*hostname == NULL)
+ goto out_nomem;
+ len = strlen(++end);
+ if (len > maxpathlen)
+ goto out_path;
+ *export_path = kstrndup(end, len, GFP_KERNEL);
+ if (!*export_path)
+ goto out_nomem;
+
+ dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", *export_path);
+ return 0;
+
+out_bad_devname:
+ dfprintk(MOUNT, "NFS: device name not in host:path format\n");
+ return -EINVAL;
+
+out_nomem:
+ dfprintk(MOUNT, "NFS: not enough memory to parse device name\n");
+ return -ENOMEM;
+
+out_hostname:
+ dfprintk(MOUNT, "NFS: server hostname too long\n");
+ return -ENAMETOOLONG;
+
+out_path:
+ dfprintk(MOUNT, "NFS: export pathname too long\n");
+ return -ENAMETOOLONG;
+}
+
+/*
+ * Validate the NFS2/NFS3 mount data
+ * - fills in the mount root filehandle
+ *
+ * For option strings, user space handles the following behaviors:
+ *
+ * + DNS: mapping server host name to IP address ("addr=" option)
+ *
+ * + failure mode: how to behave if a mount request can't be handled
+ * immediately ("fg/bg" option)
+ *
+ * + retry: how often to retry a mount request ("retry=" option)
+ *
+ * + breaking back: trying proto=udp after proto=tcp, v2 after v3,
+ * mountproto=tcp after mountproto=udp, and so on
+ */
+static int nfs_validate_mount_data(void *options,
+ struct nfs_parsed_mount_data *args,
+ struct nfs_fh *mntfh,
+ const char *dev_name)
+{
+ struct nfs_mount_data *data = (struct nfs_mount_data *)options;
+ struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+
+ if (data == NULL)
+ goto out_no_data;
+
+ switch (data->version) {
+ case 1:
+ data->namlen = 0;
+ case 2:
+ data->bsize = 0;
+ case 3:
+ if (data->flags & NFS_MOUNT_VER3)
+ goto out_no_v3;
+ data->root.size = NFS2_FHSIZE;
+ memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
+ case 4:
+ if (data->flags & NFS_MOUNT_SECFLAVOUR)
+ goto out_no_sec;
+ case 5:
+ memset(data->context, 0, sizeof(data->context));
+ case 6:
+ if (data->flags & NFS_MOUNT_VER3) {
+ if (data->root.size > NFS3_FHSIZE || data->root.size == 0)
+ goto out_invalid_fh;
+ mntfh->size = data->root.size;
+ args->version = 3;
+ } else {
+ mntfh->size = NFS2_FHSIZE;
+ args->version = 2;
+ }
+
+
+ memcpy(mntfh->data, data->root.data, mntfh->size);
+ if (mntfh->size < sizeof(mntfh->data))
+ memset(mntfh->data + mntfh->size, 0,
+ sizeof(mntfh->data) - mntfh->size);
+
+ /*
+ * Translate to nfs_parsed_mount_data, which nfs_fill_super
+ * can deal with.
+ */
+ args->flags = data->flags & NFS_MOUNT_FLAGMASK;
+ args->flags |= NFS_MOUNT_LEGACY_INTERFACE;
+ args->rsize = data->rsize;
+ args->wsize = data->wsize;
+ args->timeo = data->timeo;
+ args->retrans = data->retrans;
+ args->acregmin = data->acregmin;
+ args->acregmax = data->acregmax;
+ args->acdirmin = data->acdirmin;
+ args->acdirmax = data->acdirmax;
+
+ memcpy(sap, &data->addr, sizeof(data->addr));
+ args->nfs_server.addrlen = sizeof(data->addr);
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+
+ if (!(data->flags & NFS_MOUNT_TCP))
+ args->nfs_server.protocol = XPRT_TRANSPORT_UDP;
+ /* N.B. caller will free nfs_server.hostname in all cases */
+ args->nfs_server.hostname = kstrdup(data->hostname, GFP_KERNEL);
+ args->namlen = data->namlen;
+ args->bsize = data->bsize;
+
+ if (data->flags & NFS_MOUNT_SECFLAVOUR)
+ args->auth_flavors[0] = data->pseudoflavor;
+ if (!args->nfs_server.hostname)
+ goto out_nomem;
+
+ if (!(data->flags & NFS_MOUNT_NONLM))
+ args->flags &= ~(NFS_MOUNT_LOCAL_FLOCK|
+ NFS_MOUNT_LOCAL_FCNTL);
+ else
+ args->flags |= (NFS_MOUNT_LOCAL_FLOCK|
+ NFS_MOUNT_LOCAL_FCNTL);
+ /*
+ * The legacy version 6 binary mount data from userspace has a
+ * field used only to transport selinux information into the
+ * the kernel. To continue to support that functionality we
+ * have a touch of selinux knowledge here in the NFS code. The
+ * userspace code converted context=blah to just blah so we are
+ * converting back to the full string selinux understands.
+ */
+ if (data->context[0]){
+#ifdef CONFIG_SECURITY_SELINUX
+ int rc;
+ char *opts_str = kmalloc(sizeof(data->context) + 8, GFP_KERNEL);
+ if (!opts_str)
+ return -ENOMEM;
+ strcpy(opts_str, "context=");
+ data->context[NFS_MAX_CONTEXT_LEN] = '\0';
+ strcat(opts_str, &data->context[0]);
+ rc = security_sb_parse_opts_str(opts_str, &args->lsm_opts);
+ kfree(opts_str);
+ if (rc)
+ return rc;
+#else
+ return -EINVAL;
+#endif
+ }
+
+ break;
+ default: {
+ int status;
+
+ if (nfs_parse_mount_options((char *)options, args) == 0)
+ return -EINVAL;
+
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+
+ if (args->version == 4)
+#ifdef CONFIG_NFS_V4
+ return nfs4_validate_text_mount_data(options,
+ args, dev_name);
+#else
+ goto out_v4_not_compiled;
+#endif
+
+ nfs_set_port(sap, &args->nfs_server.port, 0);
+
+ nfs_set_mount_transport_protocol(args);
+
+ status = nfs_parse_devname(dev_name,
+ &args->nfs_server.hostname,
+ PAGE_SIZE,
+ &args->nfs_server.export_path,
+ NFS_MAXPATHLEN);
+ if (!status)
+ status = nfs_try_mount(args, mntfh);
+
+ kfree(args->nfs_server.export_path);
+ args->nfs_server.export_path = NULL;
+
+ if (status)
+ return status;
+
+ break;
+ }
+ }
+
+#ifndef CONFIG_NFS_V3
+ if (args->version == 3)
+ goto out_v3_not_compiled;
+#endif /* !CONFIG_NFS_V3 */
+
+ return 0;
+
+out_no_data:
+ dfprintk(MOUNT, "NFS: mount program didn't pass any mount data\n");
+ return -EINVAL;
+
+out_no_v3:
+ dfprintk(MOUNT, "NFS: nfs_mount_data version %d does not support v3\n",
+ data->version);
+ return -EINVAL;
+
+out_no_sec:
+ dfprintk(MOUNT, "NFS: nfs_mount_data version supports only AUTH_SYS\n");
+ return -EINVAL;
+
+#ifndef CONFIG_NFS_V3
+out_v3_not_compiled:
+ dfprintk(MOUNT, "NFS: NFSv3 is not compiled into kernel\n");
+ return -EPROTONOSUPPORT;
+#endif /* !CONFIG_NFS_V3 */
+
+#ifndef CONFIG_NFS_V4
+out_v4_not_compiled:
+ dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
+ return -EPROTONOSUPPORT;
+#endif /* !CONFIG_NFS_V4 */
+
+out_nomem:
+ dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
+ return -ENOMEM;
+
+out_no_address:
+ dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
+ return -EINVAL;
+
+out_invalid_fh:
+ dfprintk(MOUNT, "NFS: invalid root filehandle\n");
+ return -EINVAL;
+}
+
+static int
+nfs_compare_remount_data(struct nfs_server *nfss,
+ struct nfs_parsed_mount_data *data)
+{
+ if (data->flags != nfss->flags ||
+ data->rsize != nfss->rsize ||
+ data->wsize != nfss->wsize ||
+ data->retrans != nfss->client->cl_timeout->to_retries ||
+ data->auth_flavors[0] != nfss->client->cl_auth->au_flavor ||
+ data->acregmin != nfss->acregmin / HZ ||
+ data->acregmax != nfss->acregmax / HZ ||
+ data->acdirmin != nfss->acdirmin / HZ ||
+ data->acdirmax != nfss->acdirmax / HZ ||
+ data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+ data->nfs_server.port != nfss->port ||
+ data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
+ !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
+ (struct sockaddr *)&nfss->nfs_client->cl_addr))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nfs_remount(struct super_block *sb, int *flags, char *raw_data)
+{
+ int error;
+ struct nfs_server *nfss = sb->s_fs_info;
+ struct nfs_parsed_mount_data *data;
+ struct nfs_mount_data *options = (struct nfs_mount_data *)raw_data;
+ struct nfs4_mount_data *options4 = (struct nfs4_mount_data *)raw_data;
+ u32 nfsvers = nfss->nfs_client->rpc_ops->version;
+
+ /*
+ * Userspace mount programs that send binary options generally send
+ * them populated with default values. We have no way to know which
+ * ones were explicitly specified. Fall back to legacy behavior and
+ * just return success.
+ */
+ if ((nfsvers == 4 && (!options4 || options4->version == 1)) ||
+ (nfsvers <= 3 && (!options || (options->version >= 1 &&
+ options->version <= 6))))
+ return 0;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ /* fill out struct with values from existing mount */
+ data->flags = nfss->flags;
+ data->rsize = nfss->rsize;
+ data->wsize = nfss->wsize;
+ data->retrans = nfss->client->cl_timeout->to_retries;
+ data->auth_flavors[0] = nfss->client->cl_auth->au_flavor;
+ data->acregmin = nfss->acregmin / HZ;
+ data->acregmax = nfss->acregmax / HZ;
+ data->acdirmin = nfss->acdirmin / HZ;
+ data->acdirmax = nfss->acdirmax / HZ;
+ data->timeo = 10U * nfss->client->cl_timeout->to_initval / HZ;
+ data->nfs_server.port = nfss->port;
+ data->nfs_server.addrlen = nfss->nfs_client->cl_addrlen;
+ memcpy(&data->nfs_server.address, &nfss->nfs_client->cl_addr,
+ data->nfs_server.addrlen);
+
+ /* overwrite those values with any that were specified */
+ error = nfs_parse_mount_options((char *)options, data);
+ if (error < 0)
+ goto out;
+
+ /*
+ * noac is a special case. It implies -o sync, but that's not
+ * necessarily reflected in the mtab options. do_remount_sb
+ * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the
+ * remount options, so we have to explicitly reset it.
+ */
+ if (data->flags & NFS_MOUNT_NOAC)
+ *flags |= MS_SYNCHRONOUS;
+
+ /* compare new mount options with old ones */
+ error = nfs_compare_remount_data(nfss, data);
+out:
+ kfree(data);
+ return error;
+}
+
+/*
+ * Initialise the common bits of the superblock
+ */
+static inline void nfs_initialise_sb(struct super_block *sb)
+{
+ struct nfs_server *server = NFS_SB(sb);
+
+ sb->s_magic = NFS_SUPER_MAGIC;
+
+ /* We probably want something more informative here */
+ snprintf(sb->s_id, sizeof(sb->s_id),
+ "%u:%u", MAJOR(sb->s_dev), MINOR(sb->s_dev));
+
+ if (sb->s_blocksize == 0)
+ sb->s_blocksize = nfs_block_bits(server->wsize,
+ &sb->s_blocksize_bits);
+
+ sb->s_bdi = &server->backing_dev_info;
+
+ nfs_super_set_maxbytes(sb, server->maxfilesize);
+}
+
+/*
+ * Finish setting up an NFS2/3 superblock
+ */
+static void nfs_fill_super(struct super_block *sb,
+ struct nfs_parsed_mount_data *data)
+{
+ struct nfs_server *server = NFS_SB(sb);
+
+ sb->s_blocksize_bits = 0;
+ sb->s_blocksize = 0;
+ if (data->bsize)
+ sb->s_blocksize = nfs_block_size(data->bsize, &sb->s_blocksize_bits);
+
+ if (server->nfs_client->rpc_ops->version == 3) {
+ /* The VFS shouldn't apply the umask to mode bits. We will do
+ * so ourselves when necessary.
+ */
+ sb->s_flags |= MS_POSIXACL;
+ sb->s_time_gran = 1;
+ }
+
+ sb->s_op = &nfs_sops;
+ nfs_initialise_sb(sb);
+}
+
+/*
+ * Finish setting up a cloned NFS2/3 superblock
+ */
+static void nfs_clone_super(struct super_block *sb,
+ const struct super_block *old_sb)
+{
+ struct nfs_server *server = NFS_SB(sb);
+
+ sb->s_blocksize_bits = old_sb->s_blocksize_bits;
+ sb->s_blocksize = old_sb->s_blocksize;
+ sb->s_maxbytes = old_sb->s_maxbytes;
+
+ if (server->nfs_client->rpc_ops->version == 3) {
+ /* The VFS shouldn't apply the umask to mode bits. We will do
+ * so ourselves when necessary.
+ */
+ sb->s_flags |= MS_POSIXACL;
+ sb->s_time_gran = 1;
+ }
+
+ sb->s_op = old_sb->s_op;
+ nfs_initialise_sb(sb);
+}
+
+static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
+{
+ const struct nfs_server *a = s->s_fs_info;
+ const struct rpc_clnt *clnt_a = a->client;
+ const struct rpc_clnt *clnt_b = b->client;
+
+ if ((s->s_flags & NFS_MS_MASK) != (flags & NFS_MS_MASK))
+ goto Ebusy;
+ if (a->nfs_client != b->nfs_client)
+ goto Ebusy;
+ if (a->flags != b->flags)
+ goto Ebusy;
+ if (a->wsize != b->wsize)
+ goto Ebusy;
+ if (a->rsize != b->rsize)
+ goto Ebusy;
+ if (a->acregmin != b->acregmin)
+ goto Ebusy;
+ if (a->acregmax != b->acregmax)
+ goto Ebusy;
+ if (a->acdirmin != b->acdirmin)
+ goto Ebusy;
+ if (a->acdirmax != b->acdirmax)
+ goto Ebusy;
+ if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
+ goto Ebusy;
+ return 1;
+Ebusy:
+ return 0;
+}
+
+struct nfs_sb_mountdata {
+ struct nfs_server *server;
+ int mntflags;
+};
+
+static int nfs_set_super(struct super_block *s, void *data)
+{
+ struct nfs_sb_mountdata *sb_mntdata = data;
+ struct nfs_server *server = sb_mntdata->server;
+ int ret;
+
+ s->s_flags = sb_mntdata->mntflags;
+ s->s_fs_info = server;
+ s->s_d_op = server->nfs_client->rpc_ops->dentry_ops;
+ ret = set_anon_super(s, server);
+ if (ret == 0)
+ server->s_dev = s->s_dev;
+ return ret;
+}
+
+static int nfs_compare_super_address(struct nfs_server *server1,
+ struct nfs_server *server2)
+{
+ struct sockaddr *sap1, *sap2;
+
+ sap1 = (struct sockaddr *)&server1->nfs_client->cl_addr;
+ sap2 = (struct sockaddr *)&server2->nfs_client->cl_addr;
+
+ if (sap1->sa_family != sap2->sa_family)
+ return 0;
+
+ switch (sap1->sa_family) {
+ case AF_INET: {
+ struct sockaddr_in *sin1 = (struct sockaddr_in *)sap1;
+ struct sockaddr_in *sin2 = (struct sockaddr_in *)sap2;
+ if (sin1->sin_addr.s_addr != sin2->sin_addr.s_addr)
+ return 0;
+ if (sin1->sin_port != sin2->sin_port)
+ return 0;
+ break;
+ }
+ case AF_INET6: {
+ struct sockaddr_in6 *sin1 = (struct sockaddr_in6 *)sap1;
+ struct sockaddr_in6 *sin2 = (struct sockaddr_in6 *)sap2;
+ if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr))
+ return 0;
+ if (sin1->sin6_port != sin2->sin6_port)
+ return 0;
+ break;
+ }
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+static int nfs_compare_super(struct super_block *sb, void *data)
+{
+ struct nfs_sb_mountdata *sb_mntdata = data;
+ struct nfs_server *server = sb_mntdata->server, *old = NFS_SB(sb);
+ int mntflags = sb_mntdata->mntflags;
+
+ if (!nfs_compare_super_address(old, server))
+ return 0;
+ /* Note: NFS_MOUNT_UNSHARED == NFS4_MOUNT_UNSHARED */
+ if (old->flags & NFS_MOUNT_UNSHARED)
+ return 0;
+ if (memcmp(&old->fsid, &server->fsid, sizeof(old->fsid)) != 0)
+ return 0;
+ return nfs_compare_mount_options(sb, server, mntflags);
+}
+
+static int nfs_bdi_register(struct nfs_server *server)
+{
+ return bdi_register_dev(&server->backing_dev_info, server->s_dev);
+}
+
+static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data)
+{
+ struct nfs_server *server = NULL;
+ struct super_block *s;
+ struct nfs_parsed_mount_data *data;
+ struct nfs_fh *mntfh;
+ struct dentry *mntroot = ERR_PTR(-ENOMEM);
+ int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
+ int error;
+
+ data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
+ mntfh = nfs_alloc_fhandle();
+ if (data == NULL || mntfh == NULL)
+ goto out;
+
+ /* Validate the mount data */
+ error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
+ if (error < 0) {
+ mntroot = ERR_PTR(error);
+ goto out;
+ }
+
+#ifdef CONFIG_NFS_V4
+ if (data->version == 4) {
+ mntroot = nfs4_try_mount(flags, dev_name, data);
+ goto out;
+ }
+#endif /* CONFIG_NFS_V4 */
+
+ /* Get a volume representation */
+ server = nfs_create_server(data, mntfh);
+ if (IS_ERR(server)) {
+ mntroot = ERR_CAST(server);
+ goto out;
+ }
+ sb_mntdata.server = server;
+
+ if (server->flags & NFS_MOUNT_UNSHARED)
+ compare_super = NULL;
+
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
+ /* Get a superblock - note that we may end up sharing one that already exists */
+ s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ if (IS_ERR(s)) {
+ mntroot = ERR_CAST(s);
+ goto out_err_nosb;
+ }
+
+ if (s->s_fs_info != server) {
+ nfs_free_server(server);
+ server = NULL;
+ } else {
+ error = nfs_bdi_register(server);
+ if (error) {
+ mntroot = ERR_PTR(error);
+ goto error_splat_bdi;
+ }
+ }
+
+ if (!s->s_root) {
+ /* initial superblock/root creation */
+ nfs_fill_super(s, data);
+ nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
+ }
+
+ mntroot = nfs_get_root(s, mntfh, dev_name);
+ if (IS_ERR(mntroot))
+ goto error_splat_super;
+
+ error = security_sb_set_mnt_opts(s, &data->lsm_opts);
+ if (error)
+ goto error_splat_root;
+
+ s->s_flags |= MS_ACTIVE;
+
+out:
+ nfs_free_parsed_mount_data(data);
+ nfs_free_fhandle(mntfh);
+ return mntroot;
+
+out_err_nosb:
+ nfs_free_server(server);
+ goto out;
+
+error_splat_root:
+ dput(mntroot);
+ mntroot = ERR_PTR(error);
+error_splat_super:
+ if (server && !s->s_root)
+ bdi_unregister(&server->backing_dev_info);
+error_splat_bdi:
+ deactivate_locked_super(s);
+ goto out;
+}
+
+/*
+ * Ensure that we unregister the bdi before kill_anon_super
+ * releases the device name
+ */
+static void nfs_put_super(struct super_block *s)
+{
+ struct nfs_server *server = NFS_SB(s);
+
+ bdi_unregister(&server->backing_dev_info);
+}
+
+/*
+ * Destroy an NFS2/3 superblock
+ */
+static void nfs_kill_super(struct super_block *s)
+{
+ struct nfs_server *server = NFS_SB(s);
+
+ kill_anon_super(s);
+ nfs_fscache_release_super_cookie(s);
+ nfs_free_server(server);
+}
+
+/*
+ * Clone an NFS2/3 server record on xdev traversal (FSID-change)
+ */
+static struct dentry *
+nfs_xdev_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *raw_data)
+{
+ struct nfs_clone_mount *data = raw_data;
+ struct super_block *s;
+ struct nfs_server *server;
+ struct dentry *mntroot;
+ int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
+ int error;
+
+ dprintk("--> nfs_xdev_mount()\n");
+
+ /* create a new volume representation */
+ server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
+ if (IS_ERR(server)) {
+ error = PTR_ERR(server);
+ goto out_err_noserver;
+ }
+ sb_mntdata.server = server;
+
+ if (server->flags & NFS_MOUNT_UNSHARED)
+ compare_super = NULL;
+
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
+ /* Get a superblock - note that we may end up sharing one that already exists */
+ s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ if (IS_ERR(s)) {
+ error = PTR_ERR(s);
+ goto out_err_nosb;
+ }
+
+ if (s->s_fs_info != server) {
+ nfs_free_server(server);
+ server = NULL;
+ } else {
+ error = nfs_bdi_register(server);
+ if (error)
+ goto error_splat_bdi;
+ }
+
+ if (!s->s_root) {
+ /* initial superblock/root creation */
+ nfs_clone_super(s, data->sb);
+ nfs_fscache_get_super_cookie(s, NULL, data);
+ }
+
+ mntroot = nfs_get_root(s, data->fh, dev_name);
+ if (IS_ERR(mntroot)) {
+ error = PTR_ERR(mntroot);
+ goto error_splat_super;
+ }
+ if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
+ dput(mntroot);
+ error = -ESTALE;
+ goto error_splat_super;
+ }
+
+ s->s_flags |= MS_ACTIVE;
+
+ /* clone any lsm security options from the parent to the new sb */
+ security_sb_clone_mnt_opts(data->sb, s);
+
+ dprintk("<-- nfs_xdev_mount() = 0\n");
+ return mntroot;
+
+out_err_nosb:
+ nfs_free_server(server);
+out_err_noserver:
+ dprintk("<-- nfs_xdev_mount() = %d [error]\n", error);
+ return ERR_PTR(error);
+
+error_splat_super:
+ if (server && !s->s_root)
+ bdi_unregister(&server->backing_dev_info);
+error_splat_bdi:
+ deactivate_locked_super(s);
+ dprintk("<-- nfs_xdev_mount() = %d [splat]\n", error);
+ return ERR_PTR(error);
+}
+
+#ifdef CONFIG_NFS_V4
+
+/*
+ * Finish setting up a cloned NFS4 superblock
+ */
+static void nfs4_clone_super(struct super_block *sb,
+ const struct super_block *old_sb)
+{
+ sb->s_blocksize_bits = old_sb->s_blocksize_bits;
+ sb->s_blocksize = old_sb->s_blocksize;
+ sb->s_maxbytes = old_sb->s_maxbytes;
+ sb->s_time_gran = 1;
+ sb->s_op = old_sb->s_op;
+ /*
+ * The VFS shouldn't apply the umask to mode bits. We will do
+ * so ourselves when necessary.
+ */
+ sb->s_flags |= MS_POSIXACL;
+ sb->s_xattr = old_sb->s_xattr;
+ nfs_initialise_sb(sb);
+}
+
+/*
+ * Set up an NFS4 superblock
+ */
+static void nfs4_fill_super(struct super_block *sb)
+{
+ sb->s_time_gran = 1;
+ sb->s_op = &nfs4_sops;
+ /*
+ * The VFS shouldn't apply the umask to mode bits. We will do
+ * so ourselves when necessary.
+ */
+ sb->s_flags |= MS_POSIXACL;
+ sb->s_xattr = nfs4_xattr_handlers;
+ nfs_initialise_sb(sb);
+}
+
+static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
+{
+ args->flags &= ~(NFS_MOUNT_NONLM|NFS_MOUNT_NOACL|NFS_MOUNT_VER3|
+ NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
+}
+
+static int nfs4_validate_text_mount_data(void *options,
+ struct nfs_parsed_mount_data *args,
+ const char *dev_name)
+{
+ struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+
+ nfs_set_port(sap, &args->nfs_server.port, NFS_PORT);
+
+ nfs_validate_transport_protocol(args);
+
+ nfs4_validate_mount_flags(args);
+
+ if (args->version != 4) {
+ dfprintk(MOUNT,
+ "NFS4: Illegal mount version\n");
+ return -EINVAL;
+ }
+
+ if (args->auth_flavor_len > 1) {
+ dfprintk(MOUNT,
+ "NFS4: Too many RPC auth flavours specified\n");
+ return -EINVAL;
+ }
+
+ return nfs_parse_devname(dev_name,
+ &args->nfs_server.hostname,
+ NFS4_MAXNAMLEN,
+ &args->nfs_server.export_path,
+ NFS4_MAXPATHLEN);
+}
+
+/*
+ * Validate NFSv4 mount options
+ */
+static int nfs4_validate_mount_data(void *options,
+ struct nfs_parsed_mount_data *args,
+ const char *dev_name)
+{
+ struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
+ struct nfs4_mount_data *data = (struct nfs4_mount_data *)options;
+ char *c;
+
+ if (data == NULL)
+ goto out_no_data;
+
+ switch (data->version) {
+ case 1:
+ if (data->host_addrlen > sizeof(args->nfs_server.address))
+ goto out_no_address;
+ if (data->host_addrlen == 0)
+ goto out_no_address;
+ args->nfs_server.addrlen = data->host_addrlen;
+ if (copy_from_user(sap, data->host_addr, data->host_addrlen))
+ return -EFAULT;
+ if (!nfs_verify_server_address(sap))
+ goto out_no_address;
+
+ if (data->auth_flavourlen) {
+ if (data->auth_flavourlen > 1)
+ goto out_inval_auth;
+ if (copy_from_user(&args->auth_flavors[0],
+ data->auth_flavours,
+ sizeof(args->auth_flavors[0])))
+ return -EFAULT;
+ }
+
+ c = strndup_user(data->hostname.data, NFS4_MAXNAMLEN);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ args->nfs_server.hostname = c;
+
+ c = strndup_user(data->mnt_path.data, NFS4_MAXPATHLEN);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ args->nfs_server.export_path = c;
+ dfprintk(MOUNT, "NFS: MNTPATH: '%s'\n", c);
+
+ c = strndup_user(data->client_addr.data, 16);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ args->client_address = c;
+
+ /*
+ * Translate to nfs_parsed_mount_data, which nfs4_fill_super
+ * can deal with.
+ */
+
+ args->flags = data->flags & NFS4_MOUNT_FLAGMASK;
+ args->rsize = data->rsize;
+ args->wsize = data->wsize;
+ args->timeo = data->timeo;
+ args->retrans = data->retrans;
+ args->acregmin = data->acregmin;
+ args->acregmax = data->acregmax;
+ args->acdirmin = data->acdirmin;
+ args->acdirmax = data->acdirmax;
+ args->nfs_server.protocol = data->proto;
+ nfs_validate_transport_protocol(args);
+
+ break;
+ default:
+ if (nfs_parse_mount_options((char *)options, args) == 0)
+ return -EINVAL;
+
+ if (!nfs_verify_server_address(sap))
+ return -EINVAL;
+
+ return nfs4_validate_text_mount_data(options, args, dev_name);
+ }
+
+ return 0;
+
+out_no_data:
+ dfprintk(MOUNT, "NFS4: mount program didn't pass any mount data\n");
+ return -EINVAL;
+
+out_inval_auth:
+ dfprintk(MOUNT, "NFS4: Invalid number of RPC auth flavours %d\n",
+ data->auth_flavourlen);
+ return -EINVAL;
+
+out_no_address:
+ dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
+ return -EINVAL;
+}
+
+/*
+ * Get the superblock for the NFS4 root partition
+ */
+static struct dentry *
+nfs4_remote_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *raw_data)
+{
+ struct nfs_parsed_mount_data *data = raw_data;
+ struct super_block *s;
+ struct nfs_server *server;
+ struct nfs_fh *mntfh;
+ struct dentry *mntroot;
+ int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
+ int error = -ENOMEM;
+
+ mntfh = nfs_alloc_fhandle();
+ if (data == NULL || mntfh == NULL)
+ goto out;
+
+ /* Get a volume representation */
+ server = nfs4_create_server(data, mntfh);
+ if (IS_ERR(server)) {
+ error = PTR_ERR(server);
+ goto out;
+ }
+ sb_mntdata.server = server;
+
+ if (server->flags & NFS4_MOUNT_UNSHARED)
+ compare_super = NULL;
+
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
+ /* Get a superblock - note that we may end up sharing one that already exists */
+ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ if (IS_ERR(s)) {
+ error = PTR_ERR(s);
+ goto out_free;
+ }
+
+ if (s->s_fs_info != server) {
+ nfs_free_server(server);
+ server = NULL;
+ } else {
+ error = nfs_bdi_register(server);
+ if (error)
+ goto error_splat_bdi;
+ }
+
+ if (!s->s_root) {
+ /* initial superblock/root creation */
+ nfs4_fill_super(s);
+ nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
+ }
+
+ mntroot = nfs4_get_root(s, mntfh, dev_name);
+ if (IS_ERR(mntroot)) {
+ error = PTR_ERR(mntroot);
+ goto error_splat_super;
+ }
+
+ error = security_sb_set_mnt_opts(s, &data->lsm_opts);
+ if (error)
+ goto error_splat_root;
+
+ s->s_flags |= MS_ACTIVE;
+
+ nfs_free_fhandle(mntfh);
+ return mntroot;
+
+out:
+ nfs_free_fhandle(mntfh);
+ return ERR_PTR(error);
+
+out_free:
+ nfs_free_server(server);
+ goto out;
+
+error_splat_root:
+ dput(mntroot);
+error_splat_super:
+ if (server && !s->s_root)
+ bdi_unregister(&server->backing_dev_info);
+error_splat_bdi:
+ deactivate_locked_super(s);
+ goto out;
+}
+
+static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
+ int flags, void *data, const char *hostname)
+{
+ struct vfsmount *root_mnt;
+ char *root_devname;
+ size_t len;
+
+ len = strlen(hostname) + 5;
+ root_devname = kmalloc(len, GFP_KERNEL);
+ if (root_devname == NULL)
+ return ERR_PTR(-ENOMEM);
+ /* Does hostname needs to be enclosed in brackets? */
+ if (strchr(hostname, ':'))
+ snprintf(root_devname, len, "[%s]:/", hostname);
+ else
+ snprintf(root_devname, len, "%s:/", hostname);
+ root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data);
+ kfree(root_devname);
+ return root_mnt;
+}
+
+struct nfs_referral_count {
+ struct list_head list;
+ const struct task_struct *task;
+ unsigned int referral_count;
+};
+
+static LIST_HEAD(nfs_referral_count_list);
+static DEFINE_SPINLOCK(nfs_referral_count_list_lock);
+
+static struct nfs_referral_count *nfs_find_referral_count(void)
+{
+ struct nfs_referral_count *p;
+
+ list_for_each_entry(p, &nfs_referral_count_list, list) {
+ if (p->task == current)
+ return p;
+ }
+ return NULL;
+}
+
+#define NFS_MAX_NESTED_REFERRALS 2
+
+static int nfs_referral_loop_protect(void)
+{
+ struct nfs_referral_count *p, *new;
+ int ret = -ENOMEM;
+
+ new = kmalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ goto out;
+ new->task = current;
+ new->referral_count = 1;
+
+ ret = 0;
+ spin_lock(&nfs_referral_count_list_lock);
+ p = nfs_find_referral_count();
+ if (p != NULL) {
+ if (p->referral_count >= NFS_MAX_NESTED_REFERRALS)
+ ret = -ELOOP;
+ else
+ p->referral_count++;
+ } else {
+ list_add(&new->list, &nfs_referral_count_list);
+ new = NULL;
+ }
+ spin_unlock(&nfs_referral_count_list_lock);
+ kfree(new);
+out:
+ return ret;
+}
+
+static void nfs_referral_loop_unprotect(void)
+{
+ struct nfs_referral_count *p;
+
+ spin_lock(&nfs_referral_count_list_lock);
+ p = nfs_find_referral_count();
+ p->referral_count--;
+ if (p->referral_count == 0)
+ list_del(&p->list);
+ else
+ p = NULL;
+ spin_unlock(&nfs_referral_count_list_lock);
+ kfree(p);
+}
+
+static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
+ const char *export_path)
+{
+ struct dentry *dentry;
+ int err;
+
+ if (IS_ERR(root_mnt))
+ return ERR_CAST(root_mnt);
+
+ err = nfs_referral_loop_protect();
+ if (err) {
+ mntput(root_mnt);
+ return ERR_PTR(err);
+ }
+
+ dentry = mount_subtree(root_mnt, export_path);
+ nfs_referral_loop_unprotect();
+
+ return dentry;
+}
+
+static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
+ struct nfs_parsed_mount_data *data)
+{
+ char *export_path;
+ struct vfsmount *root_mnt;
+ struct dentry *res;
+
+ dfprintk(MOUNT, "--> nfs4_try_mount()\n");
+
+ export_path = data->nfs_server.export_path;
+ data->nfs_server.export_path = "/";
+ root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data,
+ data->nfs_server.hostname);
+ data->nfs_server.export_path = export_path;
+
+ res = nfs_follow_remote_path(root_mnt, export_path);
+
+ dfprintk(MOUNT, "<-- nfs4_try_mount() = %ld%s\n",
+ IS_ERR(res) ? PTR_ERR(res) : 0,
+ IS_ERR(res) ? " [error]" : "");
+ return res;
+}
+
+/*
+ * Get the superblock for an NFS4 mountpoint
+ */
+static struct dentry *nfs4_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data)
+{
+ struct nfs_parsed_mount_data *data;
+ int error = -ENOMEM;
+ struct dentry *res = ERR_PTR(-ENOMEM);
+
+ data = nfs_alloc_parsed_mount_data(4);
+ if (data == NULL)
+ goto out;
+
+ /* Validate the mount data */
+ error = nfs4_validate_mount_data(raw_data, data, dev_name);
+ if (error < 0) {
+ res = ERR_PTR(error);
+ goto out;
+ }
+
+ res = nfs4_try_mount(flags, dev_name, data);
+ if (IS_ERR(res))
+ error = PTR_ERR(res);
+
+out:
+ nfs_free_parsed_mount_data(data);
+ dprintk("<-- nfs4_mount() = %d%s\n", error,
+ error != 0 ? " [error]" : "");
+ return res;
+}
+
+static void nfs4_kill_super(struct super_block *sb)
+{
+ struct nfs_server *server = NFS_SB(sb);
+
+ dprintk("--> %s\n", __func__);
+ nfs_super_return_all_delegations(sb);
+ kill_anon_super(sb);
+ nfs_fscache_release_super_cookie(sb);
+ nfs_free_server(server);
+ dprintk("<-- %s\n", __func__);
+}
+
+/*
+ * Clone an NFS4 server record on xdev traversal (FSID-change)
+ */
+static struct dentry *
+nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *raw_data)
+{
+ struct nfs_clone_mount *data = raw_data;
+ struct super_block *s;
+ struct nfs_server *server;
+ struct dentry *mntroot;
+ int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
+ int error;
+
+ dprintk("--> nfs4_xdev_mount()\n");
+
+ /* create a new volume representation */
+ server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
+ if (IS_ERR(server)) {
+ error = PTR_ERR(server);
+ goto out_err_noserver;
+ }
+ sb_mntdata.server = server;
+
+ if (server->flags & NFS4_MOUNT_UNSHARED)
+ compare_super = NULL;
+
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
+ /* Get a superblock - note that we may end up sharing one that already exists */
+ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ if (IS_ERR(s)) {
+ error = PTR_ERR(s);
+ goto out_err_nosb;
+ }
+
+ if (s->s_fs_info != server) {
+ nfs_free_server(server);
+ server = NULL;
+ } else {
+ error = nfs_bdi_register(server);
+ if (error)
+ goto error_splat_bdi;
+ }
+
+ if (!s->s_root) {
+ /* initial superblock/root creation */
+ nfs4_clone_super(s, data->sb);
+ nfs_fscache_get_super_cookie(s, NULL, data);
+ }
+
+ mntroot = nfs4_get_root(s, data->fh, dev_name);
+ if (IS_ERR(mntroot)) {
+ error = PTR_ERR(mntroot);
+ goto error_splat_super;
+ }
+ if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
+ dput(mntroot);
+ error = -ESTALE;
+ goto error_splat_super;
+ }
+
+ s->s_flags |= MS_ACTIVE;
+
+ security_sb_clone_mnt_opts(data->sb, s);
+
+ dprintk("<-- nfs4_xdev_mount() = 0\n");
+ return mntroot;
+
+out_err_nosb:
+ nfs_free_server(server);
+out_err_noserver:
+ dprintk("<-- nfs4_xdev_mount() = %d [error]\n", error);
+ return ERR_PTR(error);
+
+error_splat_super:
+ if (server && !s->s_root)
+ bdi_unregister(&server->backing_dev_info);
+error_splat_bdi:
+ deactivate_locked_super(s);
+ dprintk("<-- nfs4_xdev_mount() = %d [splat]\n", error);
+ return ERR_PTR(error);
+}
+
+static struct dentry *
+nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *raw_data)
+{
+ struct nfs_clone_mount *data = raw_data;
+ struct super_block *s;
+ struct nfs_server *server;
+ struct dentry *mntroot;
+ struct nfs_fh *mntfh;
+ int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
+ struct nfs_sb_mountdata sb_mntdata = {
+ .mntflags = flags,
+ };
+ int error = -ENOMEM;
+
+ dprintk("--> nfs4_referral_get_sb()\n");
+
+ mntfh = nfs_alloc_fhandle();
+ if (mntfh == NULL)
+ goto out_err_nofh;
+
+ /* create a new volume representation */
+ server = nfs4_create_referral_server(data, mntfh);
+ if (IS_ERR(server)) {
+ error = PTR_ERR(server);
+ goto out_err_noserver;
+ }
+ sb_mntdata.server = server;
+
+ if (server->flags & NFS4_MOUNT_UNSHARED)
+ compare_super = NULL;
+
+ /* -o noac implies -o sync */
+ if (server->flags & NFS_MOUNT_NOAC)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
+ /* Get a superblock - note that we may end up sharing one that already exists */
+ s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ if (IS_ERR(s)) {
+ error = PTR_ERR(s);
+ goto out_err_nosb;
+ }
+
+ if (s->s_fs_info != server) {
+ nfs_free_server(server);
+ server = NULL;
+ } else {
+ error = nfs_bdi_register(server);
+ if (error)
+ goto error_splat_bdi;
+ }
+
+ if (!s->s_root) {
+ /* initial superblock/root creation */
+ nfs4_fill_super(s);
+ nfs_fscache_get_super_cookie(s, NULL, data);
+ }
+
+ mntroot = nfs4_get_root(s, mntfh, dev_name);
+ if (IS_ERR(mntroot)) {
+ error = PTR_ERR(mntroot);
+ goto error_splat_super;
+ }
+ if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
+ dput(mntroot);
+ error = -ESTALE;
+ goto error_splat_super;
+ }
+
+ s->s_flags |= MS_ACTIVE;
+
+ security_sb_clone_mnt_opts(data->sb, s);
+
+ nfs_free_fhandle(mntfh);
+ dprintk("<-- nfs4_referral_get_sb() = 0\n");
+ return mntroot;
+
+out_err_nosb:
+ nfs_free_server(server);
+out_err_noserver:
+ nfs_free_fhandle(mntfh);
+out_err_nofh:
+ dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
+ return ERR_PTR(error);
+
+error_splat_super:
+ if (server && !s->s_root)
+ bdi_unregister(&server->backing_dev_info);
+error_splat_bdi:
+ deactivate_locked_super(s);
+ nfs_free_fhandle(mntfh);
+ dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
+ return ERR_PTR(error);
+}
+
+/*
+ * Create an NFS4 server record on referral traversal
+ */
+static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data)
+{
+ struct nfs_clone_mount *data = raw_data;
+ char *export_path;
+ struct vfsmount *root_mnt;
+ struct dentry *res;
+
+ dprintk("--> nfs4_referral_mount()\n");
+
+ export_path = data->mnt_path;
+ data->mnt_path = "/";
+
+ root_mnt = nfs_do_root_mount(&nfs4_remote_referral_fs_type,
+ flags, data, data->hostname);
+ data->mnt_path = export_path;
+
+ res = nfs_follow_remote_path(root_mnt, export_path);
+ dprintk("<-- nfs4_referral_mount() = %ld%s\n",
+ IS_ERR(res) ? PTR_ERR(res) : 0,
+ IS_ERR(res) ? " [error]" : "");
+ return res;
+}
+
+#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/symlink.c b/fs/nfs/symlink.c
new file mode 100644
index 00000000..05c9e02f
--- /dev/null
+++ b/fs/nfs/symlink.c
@@ -0,0 +1,78 @@
+/*
+ * linux/fs/nfs/symlink.c
+ *
+ * Copyright (C) 1992 Rick Sladkey
+ *
+ * Optimization changes Copyright (C) 1994 Florian La Roche
+ *
+ * Jun 7 1999, cache symlink lookups in the page cache. -DaveM
+ *
+ * nfs symlink handling code
+ */
+
+#include <linux/time.h>
+#include <linux/errno.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs.h>
+#include <linux/nfs2.h>
+#include <linux/nfs_fs.h>
+#include <linux/pagemap.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/namei.h>
+
+/* Symlink caching in the page cache is even more simplistic
+ * and straight-forward than readdir caching.
+ */
+
+static int nfs_symlink_filler(struct inode *inode, struct page *page)
+{
+ int error;
+
+ error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
+ if (error < 0)
+ goto error;
+ SetPageUptodate(page);
+ unlock_page(page);
+ return 0;
+
+error:
+ SetPageError(page);
+ unlock_page(page);
+ return -EIO;
+}
+
+static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct inode *inode = dentry->d_inode;
+ struct page *page;
+ void *err;
+
+ err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
+ if (err)
+ goto read_failed;
+ page = read_cache_page(&inode->i_data, 0,
+ (filler_t *)nfs_symlink_filler, inode);
+ if (IS_ERR(page)) {
+ err = page;
+ goto read_failed;
+ }
+ nd_set_link(nd, kmap(page));
+ return page;
+
+read_failed:
+ nd_set_link(nd, err);
+ return NULL;
+}
+
+/*
+ * symlinks can't do much...
+ */
+const struct inode_operations nfs_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = nfs_follow_link,
+ .put_link = page_put_link,
+ .getattr = nfs_getattr,
+ .setattr = nfs_setattr,
+};
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
new file mode 100644
index 00000000..ad4d2e78
--- /dev/null
+++ b/fs/nfs/sysctl.c
@@ -0,0 +1,90 @@
+/*
+ * linux/fs/nfs/sysctl.c
+ *
+ * Sysctl interface to NFS parameters
+ */
+#include <linux/types.h>
+#include <linux/linkage.h>
+#include <linux/ctype.h>
+#include <linux/fs.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+#include <linux/nfs4.h>
+#include <linux/nfs_idmap.h>
+#include <linux/nfs_fs.h>
+
+#include "callback.h"
+
+#ifdef CONFIG_NFS_V4
+static const int nfs_set_port_min = 0;
+static const int nfs_set_port_max = 65535;
+#endif
+static struct ctl_table_header *nfs_callback_sysctl_table;
+
+static ctl_table nfs_cb_sysctls[] = {
+#ifdef CONFIG_NFS_V4
+ {
+ .procname = "nfs_callback_tcpport",
+ .data = &nfs_callback_set_tcpport,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (int *)&nfs_set_port_min,
+ .extra2 = (int *)&nfs_set_port_max,
+ },
+ {
+ .procname = "idmap_cache_timeout",
+ .data = &nfs_idmap_cache_timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+#endif
+ {
+ .procname = "nfs_mountpoint_timeout",
+ .data = &nfs_mountpoint_expiry_timeout,
+ .maxlen = sizeof(nfs_mountpoint_expiry_timeout),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "nfs_congestion_kb",
+ .data = &nfs_congestion_kb,
+ .maxlen = sizeof(nfs_congestion_kb),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ { }
+};
+
+static ctl_table nfs_cb_sysctl_dir[] = {
+ {
+ .procname = "nfs",
+ .mode = 0555,
+ .child = nfs_cb_sysctls,
+ },
+ { }
+};
+
+static ctl_table nfs_cb_sysctl_root[] = {
+ {
+ .procname = "fs",
+ .mode = 0555,
+ .child = nfs_cb_sysctl_dir,
+ },
+ { }
+};
+
+int nfs_register_sysctl(void)
+{
+ nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root);
+ if (nfs_callback_sysctl_table == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void nfs_unregister_sysctl(void)
+{
+ unregister_sysctl_table(nfs_callback_sysctl_table);
+ nfs_callback_sysctl_table = NULL;
+}
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
new file mode 100644
index 00000000..3210a033
--- /dev/null
+++ b/fs/nfs/unlink.c
@@ -0,0 +1,558 @@
+/*
+ * linux/fs/nfs/unlink.c
+ *
+ * nfs sillydelete handling
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/dcache.h>
+#include <linux/sunrpc/sched.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs_fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/namei.h>
+
+#include "internal.h"
+#include "nfs4_fs.h"
+#include "iostat.h"
+#include "delegation.h"
+
+/**
+ * nfs_free_unlinkdata - release data from a sillydelete operation.
+ * @data: pointer to unlink structure.
+ */
+static void
+nfs_free_unlinkdata(struct nfs_unlinkdata *data)
+{
+ iput(data->dir);
+ put_rpccred(data->cred);
+ kfree(data->args.name.name);
+ kfree(data);
+}
+
+#define NAME_ALLOC_LEN(len) ((len+16) & ~15)
+/**
+ * nfs_copy_dname - copy dentry name to data structure
+ * @dentry: pointer to dentry
+ * @data: nfs_unlinkdata
+ */
+static int nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data)
+{
+ char *str;
+ int len = dentry->d_name.len;
+
+ str = kmemdup(dentry->d_name.name, NAME_ALLOC_LEN(len), GFP_KERNEL);
+ if (!str)
+ return -ENOMEM;
+ data->args.name.len = len;
+ data->args.name.name = str;
+ return 0;
+}
+
+static void nfs_free_dname(struct nfs_unlinkdata *data)
+{
+ kfree(data->args.name.name);
+ data->args.name.name = NULL;
+ data->args.name.len = 0;
+}
+
+static void nfs_dec_sillycount(struct inode *dir)
+{
+ struct nfs_inode *nfsi = NFS_I(dir);
+ if (atomic_dec_return(&nfsi->silly_count) == 1)
+ wake_up(&nfsi->waitqueue);
+}
+
+/**
+ * nfs_async_unlink_done - Sillydelete post-processing
+ * @task: rpc_task of the sillydelete
+ *
+ * Do the directory attribute update.
+ */
+static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs_unlinkdata *data = calldata;
+ struct inode *dir = data->dir;
+
+ if (!NFS_PROTO(dir)->unlink_done(task, dir))
+ rpc_restart_call_prepare(task);
+}
+
+/**
+ * nfs_async_unlink_release - Release the sillydelete data.
+ * @task: rpc_task of the sillydelete
+ *
+ * We need to call nfs_put_unlinkdata as a 'tk_release' task since the
+ * rpc_task would be freed too.
+ */
+static void nfs_async_unlink_release(void *calldata)
+{
+ struct nfs_unlinkdata *data = calldata;
+ struct super_block *sb = data->dir->i_sb;
+
+ nfs_dec_sillycount(data->dir);
+ nfs_free_unlinkdata(data);
+ nfs_sb_deactive(sb);
+}
+
+static void nfs_unlink_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_unlinkdata *data = calldata;
+ NFS_PROTO(data->dir)->unlink_rpc_prepare(task, data);
+}
+
+static const struct rpc_call_ops nfs_unlink_ops = {
+ .rpc_call_done = nfs_async_unlink_done,
+ .rpc_release = nfs_async_unlink_release,
+ .rpc_call_prepare = nfs_unlink_prepare,
+};
+
+static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
+{
+ struct rpc_message msg = {
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_message = &msg,
+ .callback_ops = &nfs_unlink_ops,
+ .callback_data = data,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ };
+ struct rpc_task *task;
+ struct dentry *alias;
+
+ alias = d_lookup(parent, &data->args.name);
+ if (alias != NULL) {
+ int ret;
+ void *devname_garbage = NULL;
+
+ /*
+ * Hey, we raced with lookup... See if we need to transfer
+ * the sillyrename information to the aliased dentry.
+ */
+ nfs_free_dname(data);
+ ret = nfs_copy_dname(alias, data);
+ spin_lock(&alias->d_lock);
+ if (ret == 0 && alias->d_inode != NULL &&
+ !(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
+ devname_garbage = alias->d_fsdata;
+ alias->d_fsdata = data;
+ alias->d_flags |= DCACHE_NFSFS_RENAMED;
+ ret = 1;
+ } else
+ ret = 0;
+ spin_unlock(&alias->d_lock);
+ nfs_dec_sillycount(dir);
+ dput(alias);
+ /*
+ * If we'd displaced old cached devname, free it. At that
+ * point dentry is definitely not a root, so we won't need
+ * that anymore.
+ */
+ kfree(devname_garbage);
+ return ret;
+ }
+ data->dir = igrab(dir);
+ if (!data->dir) {
+ nfs_dec_sillycount(dir);
+ return 0;
+ }
+ nfs_sb_active(dir->i_sb);
+ data->args.fh = NFS_FH(dir);
+ nfs_fattr_init(data->res.dir_attr);
+
+ NFS_PROTO(dir)->unlink_setup(&msg, dir);
+
+ task_setup_data.rpc_client = NFS_CLIENT(dir);
+ task = rpc_run_task(&task_setup_data);
+ if (!IS_ERR(task))
+ rpc_put_task_async(task);
+ return 1;
+}
+
+static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
+{
+ struct dentry *parent;
+ struct inode *dir;
+ int ret = 0;
+
+
+ parent = dget_parent(dentry);
+ if (parent == NULL)
+ goto out_free;
+ dir = parent->d_inode;
+ /* Non-exclusive lock protects against concurrent lookup() calls */
+ spin_lock(&dir->i_lock);
+ if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) {
+ /* Deferred delete */
+ hlist_add_head(&data->list, &NFS_I(dir)->silly_list);
+ spin_unlock(&dir->i_lock);
+ ret = 1;
+ goto out_dput;
+ }
+ spin_unlock(&dir->i_lock);
+ ret = nfs_do_call_unlink(parent, dir, data);
+out_dput:
+ dput(parent);
+out_free:
+ return ret;
+}
+
+void nfs_block_sillyrename(struct dentry *dentry)
+{
+ struct nfs_inode *nfsi = NFS_I(dentry->d_inode);
+
+ wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1);
+}
+
+void nfs_unblock_sillyrename(struct dentry *dentry)
+{
+ struct inode *dir = dentry->d_inode;
+ struct nfs_inode *nfsi = NFS_I(dir);
+ struct nfs_unlinkdata *data;
+
+ atomic_inc(&nfsi->silly_count);
+ spin_lock(&dir->i_lock);
+ while (!hlist_empty(&nfsi->silly_list)) {
+ if (!atomic_inc_not_zero(&nfsi->silly_count))
+ break;
+ data = hlist_entry(nfsi->silly_list.first, struct nfs_unlinkdata, list);
+ hlist_del(&data->list);
+ spin_unlock(&dir->i_lock);
+ if (nfs_do_call_unlink(dentry, dir, data) == 0)
+ nfs_free_unlinkdata(data);
+ spin_lock(&dir->i_lock);
+ }
+ spin_unlock(&dir->i_lock);
+}
+
+/**
+ * nfs_async_unlink - asynchronous unlinking of a file
+ * @dir: parent directory of dentry
+ * @dentry: dentry to unlink
+ */
+static int
+nfs_async_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct nfs_unlinkdata *data;
+ int status = -ENOMEM;
+ void *devname_garbage = NULL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ goto out;
+
+ data->cred = rpc_lookup_cred();
+ if (IS_ERR(data->cred)) {
+ status = PTR_ERR(data->cred);
+ goto out_free;
+ }
+ data->res.dir_attr = &data->dir_attr;
+
+ status = -EBUSY;
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ goto out_unlock;
+ dentry->d_flags |= DCACHE_NFSFS_RENAMED;
+ devname_garbage = dentry->d_fsdata;
+ dentry->d_fsdata = data;
+ spin_unlock(&dentry->d_lock);
+ /*
+ * If we'd displaced old cached devname, free it. At that
+ * point dentry is definitely not a root, so we won't need
+ * that anymore.
+ */
+ if (devname_garbage)
+ kfree(devname_garbage);
+ return 0;
+out_unlock:
+ spin_unlock(&dentry->d_lock);
+ put_rpccred(data->cred);
+out_free:
+ kfree(data);
+out:
+ return status;
+}
+
+/**
+ * nfs_complete_unlink - Initialize completion of the sillydelete
+ * @dentry: dentry to delete
+ * @inode: inode
+ *
+ * Since we're most likely to be called by dentry_iput(), we
+ * only use the dentry to find the sillydelete. We then copy the name
+ * into the qstr.
+ */
+void
+nfs_complete_unlink(struct dentry *dentry, struct inode *inode)
+{
+ struct nfs_unlinkdata *data = NULL;
+
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+ data = dentry->d_fsdata;
+ dentry->d_fsdata = NULL;
+ }
+ spin_unlock(&dentry->d_lock);
+
+ if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)))
+ nfs_free_unlinkdata(data);
+}
+
+/* Cancel a queued async unlink. Called when a sillyrename run fails. */
+static void
+nfs_cancel_async_unlink(struct dentry *dentry)
+{
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ struct nfs_unlinkdata *data = dentry->d_fsdata;
+
+ dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
+ dentry->d_fsdata = NULL;
+ spin_unlock(&dentry->d_lock);
+ nfs_free_unlinkdata(data);
+ return;
+ }
+ spin_unlock(&dentry->d_lock);
+}
+
+/**
+ * nfs_async_rename_done - Sillyrename post-processing
+ * @task: rpc_task of the sillyrename
+ * @calldata: nfs_renamedata for the sillyrename
+ *
+ * Do the directory attribute updates and the d_move
+ */
+static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs_renamedata *data = calldata;
+ struct inode *old_dir = data->old_dir;
+ struct inode *new_dir = data->new_dir;
+ struct dentry *old_dentry = data->old_dentry;
+ struct dentry *new_dentry = data->new_dentry;
+
+ if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
+ rpc_restart_call_prepare(task);
+ return;
+ }
+
+ if (task->tk_status != 0) {
+ nfs_cancel_async_unlink(old_dentry);
+ return;
+ }
+
+ d_drop(old_dentry);
+ d_drop(new_dentry);
+}
+
+/**
+ * nfs_async_rename_release - Release the sillyrename data.
+ * @calldata: the struct nfs_renamedata to be released
+ */
+static void nfs_async_rename_release(void *calldata)
+{
+ struct nfs_renamedata *data = calldata;
+ struct super_block *sb = data->old_dir->i_sb;
+
+ if (data->old_dentry->d_inode)
+ nfs_mark_for_revalidate(data->old_dentry->d_inode);
+
+ dput(data->old_dentry);
+ dput(data->new_dentry);
+ iput(data->old_dir);
+ iput(data->new_dir);
+ nfs_sb_deactive(sb);
+ put_rpccred(data->cred);
+ kfree(data);
+}
+
+static void nfs_rename_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_renamedata *data = calldata;
+ NFS_PROTO(data->old_dir)->rename_rpc_prepare(task, data);
+}
+
+static const struct rpc_call_ops nfs_rename_ops = {
+ .rpc_call_done = nfs_async_rename_done,
+ .rpc_release = nfs_async_rename_release,
+ .rpc_call_prepare = nfs_rename_prepare,
+};
+
+/**
+ * nfs_async_rename - perform an asynchronous rename operation
+ * @old_dir: directory that currently holds the dentry to be renamed
+ * @new_dir: target directory for the rename
+ * @old_dentry: original dentry to be renamed
+ * @new_dentry: dentry to which the old_dentry should be renamed
+ *
+ * It's expected that valid references to the dentries and inodes are held
+ */
+static struct rpc_task *
+nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
+ struct dentry *old_dentry, struct dentry *new_dentry)
+{
+ struct nfs_renamedata *data;
+ struct rpc_message msg = { };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_message = &msg,
+ .callback_ops = &nfs_rename_ops,
+ .workqueue = nfsiod_workqueue,
+ .rpc_client = NFS_CLIENT(old_dir),
+ .flags = RPC_TASK_ASYNC,
+ };
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (data == NULL)
+ return ERR_PTR(-ENOMEM);
+ task_setup_data.callback_data = data;
+
+ data->cred = rpc_lookup_cred();
+ if (IS_ERR(data->cred)) {
+ struct rpc_task *task = ERR_CAST(data->cred);
+ kfree(data);
+ return task;
+ }
+
+ msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ msg.rpc_cred = data->cred;
+
+ /* set up nfs_renamedata */
+ data->old_dir = old_dir;
+ ihold(old_dir);
+ data->new_dir = new_dir;
+ ihold(new_dir);
+ data->old_dentry = dget(old_dentry);
+ data->new_dentry = dget(new_dentry);
+ nfs_fattr_init(&data->old_fattr);
+ nfs_fattr_init(&data->new_fattr);
+
+ /* set up nfs_renameargs */
+ data->args.old_dir = NFS_FH(old_dir);
+ data->args.old_name = &old_dentry->d_name;
+ data->args.new_dir = NFS_FH(new_dir);
+ data->args.new_name = &new_dentry->d_name;
+
+ /* set up nfs_renameres */
+ data->res.old_fattr = &data->old_fattr;
+ data->res.new_fattr = &data->new_fattr;
+
+ nfs_sb_active(old_dir->i_sb);
+
+ NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dir);
+
+ return rpc_run_task(&task_setup_data);
+}
+
+/**
+ * nfs_sillyrename - Perform a silly-rename of a dentry
+ * @dir: inode of directory that contains dentry
+ * @dentry: dentry to be sillyrenamed
+ *
+ * NFSv2/3 is stateless and the server doesn't know when the client is
+ * holding a file open. To prevent application problems when a file is
+ * unlinked while it's still open, the client performs a "silly-rename".
+ * That is, it renames the file to a hidden file in the same directory,
+ * and only performs the unlink once the last reference to it is put.
+ *
+ * The final cleanup is done during dentry_iput.
+ *
+ * (Note: NFSv4 is stateful, and has opens, so in theory an NFSv4 server
+ * could take responsibility for keeping open files referenced. The server
+ * would also need to ensure that opened-but-deleted files were kept over
+ * reboots. However, we may not assume a server does so. (RFC 5661
+ * does provide an OPEN4_RESULT_PRESERVE_UNLINKED flag that a server can
+ * use to advertise that it does this; some day we may take advantage of
+ * it.))
+ */
+int
+nfs_sillyrename(struct inode *dir, struct dentry *dentry)
+{
+ static unsigned int sillycounter;
+ const int fileidsize = sizeof(NFS_FILEID(dentry->d_inode))*2;
+ const int countersize = sizeof(sillycounter)*2;
+ const int slen = sizeof(".nfs")+fileidsize+countersize-1;
+ char silly[slen+1];
+ struct dentry *sdentry;
+ struct rpc_task *task;
+ int error = -EIO;
+
+ dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ dentry->d_count);
+ nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
+
+ /*
+ * We don't allow a dentry to be silly-renamed twice.
+ */
+ error = -EBUSY;
+ if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
+ goto out;
+
+ sprintf(silly, ".nfs%*.*Lx",
+ fileidsize, fileidsize,
+ (unsigned long long)NFS_FILEID(dentry->d_inode));
+
+ /* Return delegation in anticipation of the rename */
+ nfs_inode_return_delegation(dentry->d_inode);
+
+ sdentry = NULL;
+ do {
+ char *suffix = silly + slen - countersize;
+
+ dput(sdentry);
+ sillycounter++;
+ sprintf(suffix, "%*.*x", countersize, countersize, sillycounter);
+
+ dfprintk(VFS, "NFS: trying to rename %s to %s\n",
+ dentry->d_name.name, silly);
+
+ sdentry = lookup_one_len(silly, dentry->d_parent, slen);
+ /*
+ * N.B. Better to return EBUSY here ... it could be
+ * dangerous to delete the file while it's in use.
+ */
+ if (IS_ERR(sdentry))
+ goto out;
+ } while (sdentry->d_inode != NULL); /* need negative lookup */
+
+ /* queue unlink first. Can't do this from rpc_release as it
+ * has to allocate memory
+ */
+ error = nfs_async_unlink(dir, dentry);
+ if (error)
+ goto out_dput;
+
+ /* populate unlinkdata with the right dname */
+ error = nfs_copy_dname(sdentry,
+ (struct nfs_unlinkdata *)dentry->d_fsdata);
+ if (error) {
+ nfs_cancel_async_unlink(dentry);
+ goto out_dput;
+ }
+
+ /* run the rename task, undo unlink if it fails */
+ task = nfs_async_rename(dir, dir, dentry, sdentry);
+ if (IS_ERR(task)) {
+ error = -EBUSY;
+ nfs_cancel_async_unlink(dentry);
+ goto out_dput;
+ }
+
+ /* wait for the RPC task to complete, unless a SIGKILL intervenes */
+ error = rpc_wait_for_completion_task(task);
+ if (error == 0)
+ error = task->tk_status;
+ rpc_put_task(task);
+out_dput:
+ dput(sdentry);
+out:
+ return error;
+}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
new file mode 100644
index 00000000..c0746232
--- /dev/null
+++ b/fs/nfs/write.c
@@ -0,0 +1,1790 @@
+/*
+ * linux/fs/nfs/write.c
+ *
+ * Write file data over NFS.
+ *
+ * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/writeback.h>
+#include <linux/swap.h>
+#include <linux/migrate.h>
+
+#include <linux/sunrpc/clnt.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs_page.h>
+#include <linux/backing-dev.h>
+#include <linux/export.h>
+
+#include <asm/uaccess.h>
+
+#include "delegation.h"
+#include "internal.h"
+#include "iostat.h"
+#include "nfs4_fs.h"
+#include "fscache.h"
+#include "pnfs.h"
+
+#define NFSDBG_FACILITY NFSDBG_PAGECACHE
+
+#define MIN_POOL_WRITE (32)
+#define MIN_POOL_COMMIT (4)
+
+/*
+ * Local function declarations
+ */
+static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
+ struct inode *inode, int ioflags);
+static void nfs_redirty_request(struct nfs_page *req);
+static const struct rpc_call_ops nfs_write_partial_ops;
+static const struct rpc_call_ops nfs_write_full_ops;
+static const struct rpc_call_ops nfs_commit_ops;
+
+static struct kmem_cache *nfs_wdata_cachep;
+static mempool_t *nfs_wdata_mempool;
+static mempool_t *nfs_commit_mempool;
+
+struct nfs_write_data *nfs_commitdata_alloc(void)
+{
+ struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
+
+ if (p) {
+ memset(p, 0, sizeof(*p));
+ INIT_LIST_HEAD(&p->pages);
+ }
+ return p;
+}
+EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
+
+void nfs_commit_free(struct nfs_write_data *p)
+{
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
+ mempool_free(p, nfs_commit_mempool);
+}
+EXPORT_SYMBOL_GPL(nfs_commit_free);
+
+struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
+{
+ struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
+
+ if (p) {
+ memset(p, 0, sizeof(*p));
+ INIT_LIST_HEAD(&p->pages);
+ p->npages = pagecount;
+ if (pagecount <= ARRAY_SIZE(p->page_array))
+ p->pagevec = p->page_array;
+ else {
+ p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
+ if (!p->pagevec) {
+ mempool_free(p, nfs_wdata_mempool);
+ p = NULL;
+ }
+ }
+ }
+ return p;
+}
+
+void nfs_writedata_free(struct nfs_write_data *p)
+{
+ if (p && (p->pagevec != &p->page_array[0]))
+ kfree(p->pagevec);
+ mempool_free(p, nfs_wdata_mempool);
+}
+
+void nfs_writedata_release(struct nfs_write_data *wdata)
+{
+ put_nfs_open_context(wdata->args.context);
+ nfs_writedata_free(wdata);
+}
+
+static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
+{
+ ctx->error = error;
+ smp_wmb();
+ set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
+}
+
+static struct nfs_page *nfs_page_find_request_locked(struct page *page)
+{
+ struct nfs_page *req = NULL;
+
+ if (PagePrivate(page)) {
+ req = (struct nfs_page *)page_private(page);
+ if (req != NULL)
+ kref_get(&req->wb_kref);
+ }
+ return req;
+}
+
+static struct nfs_page *nfs_page_find_request(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct nfs_page *req = NULL;
+
+ spin_lock(&inode->i_lock);
+ req = nfs_page_find_request_locked(page);
+ spin_unlock(&inode->i_lock);
+ return req;
+}
+
+/* Adjust the file length if we're writing beyond the end */
+static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
+{
+ struct inode *inode = page->mapping->host;
+ loff_t end, i_size;
+ pgoff_t end_index;
+
+ spin_lock(&inode->i_lock);
+ i_size = i_size_read(inode);
+ end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
+ if (i_size > 0 && page->index < end_index)
+ goto out;
+ end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
+ if (i_size >= end)
+ goto out;
+ i_size_write(inode, end);
+ nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
+out:
+ spin_unlock(&inode->i_lock);
+}
+
+/* A writeback failed: mark the page as bad, and invalidate the page cache */
+static void nfs_set_pageerror(struct page *page)
+{
+ SetPageError(page);
+ nfs_zap_mapping(page->mapping->host, page->mapping);
+}
+
+/* We can set the PG_uptodate flag if we see that a write request
+ * covers the full page.
+ */
+static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
+{
+ if (PageUptodate(page))
+ return;
+ if (base != 0)
+ return;
+ if (count != nfs_page_length(page))
+ return;
+ SetPageUptodate(page);
+}
+
+static int wb_priority(struct writeback_control *wbc)
+{
+ if (wbc->for_reclaim)
+ return FLUSH_HIGHPRI | FLUSH_STABLE;
+ if (wbc->for_kupdate || wbc->for_background)
+ return FLUSH_LOWPRI | FLUSH_COND_STABLE;
+ return FLUSH_COND_STABLE;
+}
+
+/*
+ * NFS congestion control
+ */
+
+int nfs_congestion_kb;
+
+#define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
+#define NFS_CONGESTION_OFF_THRESH \
+ (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
+
+static int nfs_set_page_writeback(struct page *page)
+{
+ int ret = test_set_page_writeback(page);
+
+ if (!ret) {
+ struct inode *inode = page->mapping->host;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+
+ page_cache_get(page);
+ if (atomic_long_inc_return(&nfss->writeback) >
+ NFS_CONGESTION_ON_THRESH) {
+ set_bdi_congested(&nfss->backing_dev_info,
+ BLK_RW_ASYNC);
+ }
+ }
+ return ret;
+}
+
+static void nfs_end_page_writeback(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct nfs_server *nfss = NFS_SERVER(inode);
+
+ end_page_writeback(page);
+ page_cache_release(page);
+ if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
+ clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
+}
+
+static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
+{
+ struct inode *inode = page->mapping->host;
+ struct nfs_page *req;
+ int ret;
+
+ spin_lock(&inode->i_lock);
+ for (;;) {
+ req = nfs_page_find_request_locked(page);
+ if (req == NULL)
+ break;
+ if (nfs_lock_request_dontget(req))
+ break;
+ /* Note: If we hold the page lock, as is the case in nfs_writepage,
+ * then the call to nfs_lock_request_dontget() will always
+ * succeed provided that someone hasn't already marked the
+ * request as dirty (in which case we don't care).
+ */
+ spin_unlock(&inode->i_lock);
+ if (!nonblock)
+ ret = nfs_wait_on_request(req);
+ else
+ ret = -EAGAIN;
+ nfs_release_request(req);
+ if (ret != 0)
+ return ERR_PTR(ret);
+ spin_lock(&inode->i_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ return req;
+}
+
+/*
+ * Find an associated nfs write request, and prepare to flush it out
+ * May return an error if the user signalled nfs_wait_on_request().
+ */
+static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
+ struct page *page, bool nonblock)
+{
+ struct nfs_page *req;
+ int ret = 0;
+
+ req = nfs_find_and_lock_request(page, nonblock);
+ if (!req)
+ goto out;
+ ret = PTR_ERR(req);
+ if (IS_ERR(req))
+ goto out;
+
+ ret = nfs_set_page_writeback(page);
+ BUG_ON(ret != 0);
+ BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
+
+ if (!nfs_pageio_add_request(pgio, req)) {
+ nfs_redirty_request(req);
+ ret = pgio->pg_error;
+ }
+out:
+ return ret;
+}
+
+static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
+{
+ struct inode *inode = page->mapping->host;
+ int ret;
+
+ nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
+ nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
+
+ nfs_pageio_cond_complete(pgio, page->index);
+ ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
+ if (ret == -EAGAIN) {
+ redirty_page_for_writepage(wbc, page);
+ ret = 0;
+ }
+ return ret;
+}
+
+/*
+ * Write an mmapped page to the server.
+ */
+static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
+{
+ struct nfs_pageio_descriptor pgio;
+ int err;
+
+ nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
+ err = nfs_do_writepage(page, wbc, &pgio);
+ nfs_pageio_complete(&pgio);
+ if (err < 0)
+ return err;
+ if (pgio.pg_error < 0)
+ return pgio.pg_error;
+ return 0;
+}
+
+int nfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+ int ret;
+
+ ret = nfs_writepage_locked(page, wbc);
+ unlock_page(page);
+ return ret;
+}
+
+static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
+{
+ int ret;
+
+ ret = nfs_do_writepage(page, wbc, data);
+ unlock_page(page);
+ return ret;
+}
+
+int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ unsigned long *bitlock = &NFS_I(inode)->flags;
+ struct nfs_pageio_descriptor pgio;
+ int err;
+
+ /* Stop dirtying of new pages while we sync */
+ err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
+ nfs_wait_bit_killable, TASK_KILLABLE);
+ if (err)
+ goto out_err;
+
+ nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
+
+ nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
+ err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
+ nfs_pageio_complete(&pgio);
+
+ clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
+ smp_mb__after_clear_bit();
+ wake_up_bit(bitlock, NFS_INO_FLUSHING);
+
+ if (err < 0)
+ goto out_err;
+ err = pgio.pg_error;
+ if (err < 0)
+ goto out_err;
+ return 0;
+out_err:
+ return err;
+}
+
+/*
+ * Insert a write request into an inode
+ */
+static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ /* Lock the request! */
+ nfs_lock_request_dontget(req);
+
+ spin_lock(&inode->i_lock);
+ if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
+ inode->i_version++;
+ set_bit(PG_MAPPED, &req->wb_flags);
+ SetPagePrivate(req->wb_page);
+ set_page_private(req->wb_page, (unsigned long)req);
+ nfsi->npages++;
+ kref_get(&req->wb_kref);
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Remove a write request from an inode
+ */
+static void nfs_inode_remove_request(struct nfs_page *req)
+{
+ struct inode *inode = req->wb_context->dentry->d_inode;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ BUG_ON (!NFS_WBACK_BUSY(req));
+
+ spin_lock(&inode->i_lock);
+ set_page_private(req->wb_page, 0);
+ ClearPagePrivate(req->wb_page);
+ clear_bit(PG_MAPPED, &req->wb_flags);
+ nfsi->npages--;
+ spin_unlock(&inode->i_lock);
+ nfs_release_request(req);
+}
+
+static void
+nfs_mark_request_dirty(struct nfs_page *req)
+{
+ __set_page_dirty_nobuffers(req->wb_page);
+}
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+/**
+ * nfs_request_add_commit_list - add request to a commit list
+ * @req: pointer to a struct nfs_page
+ * @head: commit list head
+ *
+ * This sets the PG_CLEAN bit, updates the inode global count of
+ * number of outstanding requests requiring a commit as well as
+ * the MM page stats.
+ *
+ * The caller must _not_ hold the inode->i_lock, but must be
+ * holding the nfs_page lock.
+ */
+void
+nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
+{
+ struct inode *inode = req->wb_context->dentry->d_inode;
+
+ set_bit(PG_CLEAN, &(req)->wb_flags);
+ spin_lock(&inode->i_lock);
+ nfs_list_add_request(req, head);
+ NFS_I(inode)->ncommit++;
+ spin_unlock(&inode->i_lock);
+ inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+}
+EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
+
+/**
+ * nfs_request_remove_commit_list - Remove request from a commit list
+ * @req: pointer to a nfs_page
+ *
+ * This clears the PG_CLEAN bit, and updates the inode global count of
+ * number of outstanding requests requiring a commit
+ * It does not update the MM page stats.
+ *
+ * The caller _must_ hold the inode->i_lock and the nfs_page lock.
+ */
+void
+nfs_request_remove_commit_list(struct nfs_page *req)
+{
+ struct inode *inode = req->wb_context->dentry->d_inode;
+
+ if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
+ return;
+ nfs_list_remove_request(req);
+ NFS_I(inode)->ncommit--;
+}
+EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
+
+
+/*
+ * Add a request to the inode's commit list.
+ */
+static void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+ struct inode *inode = req->wb_context->dentry->d_inode;
+
+ if (pnfs_mark_request_commit(req, lseg))
+ return;
+ nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
+}
+
+static void
+nfs_clear_page_commit(struct page *page)
+{
+ dec_zone_page_state(page, NR_UNSTABLE_NFS);
+ dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
+}
+
+static void
+nfs_clear_request_commit(struct nfs_page *req)
+{
+ if (test_bit(PG_CLEAN, &req->wb_flags)) {
+ struct inode *inode = req->wb_context->dentry->d_inode;
+
+ if (!pnfs_clear_request_commit(req)) {
+ spin_lock(&inode->i_lock);
+ nfs_request_remove_commit_list(req);
+ spin_unlock(&inode->i_lock);
+ }
+ nfs_clear_page_commit(req->wb_page);
+ }
+}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+ if (data->verf.committed == NFS_DATA_SYNC)
+ return data->lseg == NULL;
+ else
+ return data->verf.committed != NFS_FILE_SYNC;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req,
+ struct nfs_write_data *data)
+{
+ if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ nfs_mark_request_commit(req, data->lseg);
+ return 1;
+ }
+ if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+ nfs_mark_request_dirty(req);
+ return 1;
+ }
+ return 0;
+}
+#else
+static void
+nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
+{
+}
+
+static void
+nfs_clear_request_commit(struct nfs_page *req)
+{
+}
+
+static inline
+int nfs_write_need_commit(struct nfs_write_data *data)
+{
+ return 0;
+}
+
+static inline
+int nfs_reschedule_unstable_write(struct nfs_page *req,
+ struct nfs_write_data *data)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+static int
+nfs_need_commit(struct nfs_inode *nfsi)
+{
+ return nfsi->ncommit > 0;
+}
+
+/* i_lock held by caller */
+static int
+nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
+ spinlock_t *lock)
+{
+ struct nfs_page *req, *tmp;
+ int ret = 0;
+
+ list_for_each_entry_safe(req, tmp, src, wb_list) {
+ if (!nfs_lock_request(req))
+ continue;
+ if (cond_resched_lock(lock))
+ list_safe_reset_next(req, tmp, wb_list);
+ nfs_request_remove_commit_list(req);
+ nfs_list_add_request(req, dst);
+ ret++;
+ if (ret == max)
+ break;
+ }
+ return ret;
+}
+
+/*
+ * nfs_scan_commit - Scan an inode for commit requests
+ * @inode: NFS inode to scan
+ * @dst: destination list
+ *
+ * Moves requests from the inode's 'commit' request list.
+ * The requests are *not* checked to ensure that they form a contiguous set.
+ */
+static int
+nfs_scan_commit(struct inode *inode, struct list_head *dst)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ int ret = 0;
+
+ spin_lock(&inode->i_lock);
+ if (nfsi->ncommit > 0) {
+ const int max = INT_MAX;
+
+ ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max,
+ &inode->i_lock);
+ ret += pnfs_scan_commit_lists(inode, max - ret,
+ &inode->i_lock);
+ }
+ spin_unlock(&inode->i_lock);
+ return ret;
+}
+
+#else
+static inline int nfs_need_commit(struct nfs_inode *nfsi)
+{
+ return 0;
+}
+
+static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Search for an existing write request, and attempt to update
+ * it to reflect a new dirty region on a given page.
+ *
+ * If the attempt fails, then the existing request is flushed out
+ * to disk.
+ */
+static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
+ struct page *page,
+ unsigned int offset,
+ unsigned int bytes)
+{
+ struct nfs_page *req;
+ unsigned int rqend;
+ unsigned int end;
+ int error;
+
+ if (!PagePrivate(page))
+ return NULL;
+
+ end = offset + bytes;
+ spin_lock(&inode->i_lock);
+
+ for (;;) {
+ req = nfs_page_find_request_locked(page);
+ if (req == NULL)
+ goto out_unlock;
+
+ rqend = req->wb_offset + req->wb_bytes;
+ /*
+ * Tell the caller to flush out the request if
+ * the offsets are non-contiguous.
+ * Note: nfs_flush_incompatible() will already
+ * have flushed out requests having wrong owners.
+ */
+ if (offset > rqend
+ || end < req->wb_offset)
+ goto out_flushme;
+
+ if (nfs_lock_request_dontget(req))
+ break;
+
+ /* The request is locked, so wait and then retry */
+ spin_unlock(&inode->i_lock);
+ error = nfs_wait_on_request(req);
+ nfs_release_request(req);
+ if (error != 0)
+ goto out_err;
+ spin_lock(&inode->i_lock);
+ }
+
+ /* Okay, the request matches. Update the region */
+ if (offset < req->wb_offset) {
+ req->wb_offset = offset;
+ req->wb_pgbase = offset;
+ }
+ if (end > rqend)
+ req->wb_bytes = end - req->wb_offset;
+ else
+ req->wb_bytes = rqend - req->wb_offset;
+out_unlock:
+ spin_unlock(&inode->i_lock);
+ if (req)
+ nfs_clear_request_commit(req);
+ return req;
+out_flushme:
+ spin_unlock(&inode->i_lock);
+ nfs_release_request(req);
+ error = nfs_wb_page(inode, page);
+out_err:
+ return ERR_PTR(error);
+}
+
+/*
+ * Try to update an existing write request, or create one if there is none.
+ *
+ * Note: Should always be called with the Page Lock held to prevent races
+ * if we have to add a new request. Also assumes that the caller has
+ * already called nfs_flush_incompatible() if necessary.
+ */
+static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
+ struct page *page, unsigned int offset, unsigned int bytes)
+{
+ struct inode *inode = page->mapping->host;
+ struct nfs_page *req;
+
+ req = nfs_try_to_update_request(inode, page, offset, bytes);
+ if (req != NULL)
+ goto out;
+ req = nfs_create_request(ctx, inode, page, offset, bytes);
+ if (IS_ERR(req))
+ goto out;
+ nfs_inode_add_request(inode, req);
+out:
+ return req;
+}
+
+static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
+ unsigned int offset, unsigned int count)
+{
+ struct nfs_page *req;
+
+ req = nfs_setup_write_request(ctx, page, offset, count);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ /* Update file length */
+ nfs_grow_file(page, offset, count);
+ nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
+ nfs_mark_request_dirty(req);
+ nfs_unlock_request(req);
+ return 0;
+}
+
+int nfs_flush_incompatible(struct file *file, struct page *page)
+{
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct nfs_page *req;
+ int do_flush, status;
+ /*
+ * Look for a request corresponding to this page. If there
+ * is one, and it belongs to another file, we flush it out
+ * before we try to copy anything into the page. Do this
+ * due to the lack of an ACCESS-type call in NFSv2.
+ * Also do the same if we find a request from an existing
+ * dropped page.
+ */
+ do {
+ req = nfs_page_find_request(page);
+ if (req == NULL)
+ return 0;
+ do_flush = req->wb_page != page || req->wb_context != ctx ||
+ req->wb_lock_context->lockowner != current->files ||
+ req->wb_lock_context->pid != current->tgid;
+ nfs_release_request(req);
+ if (!do_flush)
+ return 0;
+ status = nfs_wb_page(page->mapping->host, page);
+ } while (status == 0);
+ return status;
+}
+
+/*
+ * If the page cache is marked as unsafe or invalid, then we can't rely on
+ * the PageUptodate() flag. In this case, we will need to turn off
+ * write optimisations that depend on the page contents being correct.
+ */
+static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
+{
+ return PageUptodate(page) &&
+ !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
+}
+
+/*
+ * Update and possibly write a cached page of an NFS file.
+ *
+ * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
+ * things with a page scheduled for an RPC call (e.g. invalidate it).
+ */
+int nfs_updatepage(struct file *file, struct page *page,
+ unsigned int offset, unsigned int count)
+{
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct inode *inode = page->mapping->host;
+ int status = 0;
+
+ nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
+
+ dprintk("NFS: nfs_updatepage(%s/%s %d@%lld)\n",
+ file->f_path.dentry->d_parent->d_name.name,
+ file->f_path.dentry->d_name.name, count,
+ (long long)(page_offset(page) + offset));
+
+ /* If we're not using byte range locks, and we know the page
+ * is up to date, it may be more efficient to extend the write
+ * to cover the entire page in order to avoid fragmentation
+ * inefficiencies.
+ */
+ if (nfs_write_pageuptodate(page, inode) &&
+ inode->i_flock == NULL &&
+ !(file->f_flags & O_DSYNC)) {
+ count = max(count + offset, nfs_page_length(page));
+ offset = 0;
+ }
+
+ status = nfs_writepage_setup(ctx, page, offset, count);
+ if (status < 0)
+ nfs_set_pageerror(page);
+ else
+ __set_page_dirty_nobuffers(page);
+
+ dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n",
+ status, (long long)i_size_read(inode));
+ return status;
+}
+
+static void nfs_writepage_release(struct nfs_page *req,
+ struct nfs_write_data *data)
+{
+ struct page *page = req->wb_page;
+
+ if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
+ nfs_inode_remove_request(req);
+ nfs_unlock_request(req);
+ nfs_end_page_writeback(page);
+}
+
+static int flush_task_priority(int how)
+{
+ switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
+ case FLUSH_HIGHPRI:
+ return RPC_PRIORITY_HIGH;
+ case FLUSH_LOWPRI:
+ return RPC_PRIORITY_LOW;
+ }
+ return RPC_PRIORITY_NORMAL;
+}
+
+int nfs_initiate_write(struct nfs_write_data *data,
+ struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how)
+{
+ struct inode *inode = data->inode;
+ int priority = flush_task_priority(how);
+ struct rpc_task *task;
+ struct rpc_message msg = {
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = clnt,
+ .task = &data->task,
+ .rpc_message = &msg,
+ .callback_ops = call_ops,
+ .callback_data = data,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ .priority = priority,
+ };
+ int ret = 0;
+
+ /* Set up the initial task struct. */
+ NFS_PROTO(inode)->write_setup(data, &msg);
+
+ dprintk("NFS: %5u initiated write call "
+ "(req %s/%lld, %u bytes @ offset %llu)\n",
+ data->task.tk_pid,
+ inode->i_sb->s_id,
+ (long long)NFS_FILEID(inode),
+ data->args.count,
+ (unsigned long long)data->args.offset);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task)) {
+ ret = PTR_ERR(task);
+ goto out;
+ }
+ if (how & FLUSH_SYNC) {
+ ret = rpc_wait_for_completion_task(task);
+ if (ret == 0)
+ ret = task->tk_status;
+ }
+ rpc_put_task(task);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nfs_initiate_write);
+
+/*
+ * Set up the argument/result storage required for the RPC call.
+ */
+static void nfs_write_rpcsetup(struct nfs_page *req,
+ struct nfs_write_data *data,
+ unsigned int count, unsigned int offset,
+ int how)
+{
+ struct inode *inode = req->wb_context->dentry->d_inode;
+
+ /* Set up the RPC argument and reply structs
+ * NB: take care not to mess about with data->commit et al. */
+
+ data->req = req;
+ data->inode = inode = req->wb_context->dentry->d_inode;
+ data->cred = req->wb_context->cred;
+
+ data->args.fh = NFS_FH(inode);
+ data->args.offset = req_offset(req) + offset;
+ /* pnfs_set_layoutcommit needs this */
+ data->mds_offset = data->args.offset;
+ data->args.pgbase = req->wb_pgbase + offset;
+ data->args.pages = data->pagevec;
+ data->args.count = count;
+ data->args.context = get_nfs_open_context(req->wb_context);
+ data->args.lock_context = req->wb_lock_context;
+ data->args.stable = NFS_UNSTABLE;
+ switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
+ case 0:
+ break;
+ case FLUSH_COND_STABLE:
+ if (nfs_need_commit(NFS_I(inode)))
+ break;
+ default:
+ data->args.stable = NFS_FILE_SYNC;
+ }
+
+ data->res.fattr = &data->fattr;
+ data->res.count = count;
+ data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
+}
+
+static int nfs_do_write(struct nfs_write_data *data,
+ const struct rpc_call_ops *call_ops,
+ int how)
+{
+ struct inode *inode = data->args.context->dentry->d_inode;
+
+ return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
+}
+
+static int nfs_do_multiple_writes(struct list_head *head,
+ const struct rpc_call_ops *call_ops,
+ int how)
+{
+ struct nfs_write_data *data;
+ int ret = 0;
+
+ while (!list_empty(head)) {
+ int ret2;
+
+ data = list_entry(head->next, struct nfs_write_data, list);
+ list_del_init(&data->list);
+
+ ret2 = nfs_do_write(data, call_ops, how);
+ if (ret == 0)
+ ret = ret2;
+ }
+ return ret;
+}
+
+/* If a nfs_flush_* function fails, it should remove reqs from @head and
+ * call this on each, which will prepare them to be retried on next
+ * writeback using standard nfs.
+ */
+static void nfs_redirty_request(struct nfs_page *req)
+{
+ struct page *page = req->wb_page;
+
+ nfs_mark_request_dirty(req);
+ nfs_unlock_request(req);
+ nfs_end_page_writeback(page);
+}
+
+/*
+ * Generate multiple small requests to write out a single
+ * contiguous dirty area on one page.
+ */
+static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
+{
+ struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
+ struct page *page = req->wb_page;
+ struct nfs_write_data *data;
+ size_t wsize = desc->pg_bsize, nbytes;
+ unsigned int offset;
+ int requests = 0;
+ int ret = 0;
+
+ nfs_list_remove_request(req);
+
+ if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
+ (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
+ desc->pg_count > wsize))
+ desc->pg_ioflags &= ~FLUSH_COND_STABLE;
+
+
+ offset = 0;
+ nbytes = desc->pg_count;
+ do {
+ size_t len = min(nbytes, wsize);
+
+ data = nfs_writedata_alloc(1);
+ if (!data)
+ goto out_bad;
+ data->pagevec[0] = page;
+ nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
+ list_add(&data->list, res);
+ requests++;
+ nbytes -= len;
+ offset += len;
+ } while (nbytes != 0);
+ atomic_set(&req->wb_complete, requests);
+ desc->pg_rpc_callops = &nfs_write_partial_ops;
+ return ret;
+
+out_bad:
+ while (!list_empty(res)) {
+ data = list_entry(res->next, struct nfs_write_data, list);
+ list_del(&data->list);
+ nfs_writedata_release(data);
+ }
+ nfs_redirty_request(req);
+ return -ENOMEM;
+}
+
+/*
+ * Create an RPC task for the given write request and kick it.
+ * The page must have been locked by the caller.
+ *
+ * It may happen that the page we're passed is not marked dirty.
+ * This is the case if nfs_updatepage detects a conflicting request
+ * that has been written but not committed.
+ */
+static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
+{
+ struct nfs_page *req;
+ struct page **pages;
+ struct nfs_write_data *data;
+ struct list_head *head = &desc->pg_list;
+ int ret = 0;
+
+ data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
+ desc->pg_count));
+ if (!data) {
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_redirty_request(req);
+ }
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages = data->pagevec;
+ while (!list_empty(head)) {
+ req = nfs_list_entry(head->next);
+ nfs_list_remove_request(req);
+ nfs_list_add_request(req, &data->pages);
+ *pages++ = req->wb_page;
+ }
+ req = nfs_list_entry(data->pages.next);
+
+ if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
+ (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
+ desc->pg_ioflags &= ~FLUSH_COND_STABLE;
+
+ /* Set up the argument struct */
+ nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
+ list_add(&data->list, res);
+ desc->pg_rpc_callops = &nfs_write_full_ops;
+out:
+ return ret;
+}
+
+int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
+{
+ if (desc->pg_bsize < PAGE_CACHE_SIZE)
+ return nfs_flush_multi(desc, head);
+ return nfs_flush_one(desc, head);
+}
+
+static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
+{
+ LIST_HEAD(head);
+ int ret;
+
+ ret = nfs_generic_flush(desc, &head);
+ if (ret == 0)
+ ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
+ desc->pg_ioflags);
+ return ret;
+}
+
+static const struct nfs_pageio_ops nfs_pageio_write_ops = {
+ .pg_test = nfs_generic_pg_test,
+ .pg_doio = nfs_generic_pg_writepages,
+};
+
+void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags)
+{
+ nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
+ NFS_SERVER(inode)->wsize, ioflags);
+}
+
+void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
+{
+ pgio->pg_ops = &nfs_pageio_write_ops;
+ pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
+}
+EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
+
+static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
+ struct inode *inode, int ioflags)
+{
+ if (!pnfs_pageio_init_write(pgio, inode, ioflags))
+ nfs_pageio_init_write_mds(pgio, inode, ioflags);
+}
+
+/*
+ * Handle a write reply that flushed part of a page.
+ */
+static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+
+ dprintk("NFS: %5u write(%s/%lld %d@%lld)",
+ task->tk_pid,
+ data->req->wb_context->dentry->d_inode->i_sb->s_id,
+ (long long)
+ NFS_FILEID(data->req->wb_context->dentry->d_inode),
+ data->req->wb_bytes, (long long)req_offset(data->req));
+
+ nfs_writeback_done(task, data);
+}
+
+static void nfs_writeback_release_partial(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ struct nfs_page *req = data->req;
+ struct page *page = req->wb_page;
+ int status = data->task.tk_status;
+
+ if (status < 0) {
+ nfs_set_pageerror(page);
+ nfs_context_set_write_error(req->wb_context, status);
+ dprintk(", error = %d\n", status);
+ goto out;
+ }
+
+ if (nfs_write_need_commit(data)) {
+ struct inode *inode = page->mapping->host;
+
+ spin_lock(&inode->i_lock);
+ if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
+ /* Do nothing we need to resend the writes */
+ } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
+ memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+ dprintk(" defer commit\n");
+ } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
+ set_bit(PG_NEED_RESCHED, &req->wb_flags);
+ clear_bit(PG_NEED_COMMIT, &req->wb_flags);
+ dprintk(" server reboot detected\n");
+ }
+ spin_unlock(&inode->i_lock);
+ } else
+ dprintk(" OK\n");
+
+out:
+ if (atomic_dec_and_test(&req->wb_complete))
+ nfs_writepage_release(req, data);
+ nfs_writedata_release(calldata);
+}
+
+void nfs_write_prepare(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
+}
+
+static const struct rpc_call_ops nfs_write_partial_ops = {
+ .rpc_call_prepare = nfs_write_prepare,
+ .rpc_call_done = nfs_writeback_done_partial,
+ .rpc_release = nfs_writeback_release_partial,
+};
+
+/*
+ * Handle a write reply that flushes a whole page.
+ *
+ * FIXME: There is an inherent race with invalidate_inode_pages and
+ * writebacks since the page->count is kept > 1 for as long
+ * as the page has a write request pending.
+ */
+static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+
+ nfs_writeback_done(task, data);
+}
+
+static void nfs_writeback_release_full(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+ int status = data->task.tk_status;
+
+ /* Update attributes as result of writeback. */
+ while (!list_empty(&data->pages)) {
+ struct nfs_page *req = nfs_list_entry(data->pages.next);
+ struct page *page = req->wb_page;
+
+ nfs_list_remove_request(req);
+
+ dprintk("NFS: %5u write (%s/%lld %d@%lld)",
+ data->task.tk_pid,
+ req->wb_context->dentry->d_inode->i_sb->s_id,
+ (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+ req->wb_bytes,
+ (long long)req_offset(req));
+
+ if (status < 0) {
+ nfs_set_pageerror(page);
+ nfs_context_set_write_error(req->wb_context, status);
+ dprintk(", error = %d\n", status);
+ goto remove_request;
+ }
+
+ if (nfs_write_need_commit(data)) {
+ memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
+ nfs_mark_request_commit(req, data->lseg);
+ dprintk(" marked for commit\n");
+ goto next;
+ }
+ dprintk(" OK\n");
+remove_request:
+ nfs_inode_remove_request(req);
+ next:
+ nfs_unlock_request(req);
+ nfs_end_page_writeback(page);
+ }
+ nfs_writedata_release(calldata);
+}
+
+static const struct rpc_call_ops nfs_write_full_ops = {
+ .rpc_call_prepare = nfs_write_prepare,
+ .rpc_call_done = nfs_writeback_done_full,
+ .rpc_release = nfs_writeback_release_full,
+};
+
+
+/*
+ * This function is called when the WRITE call is complete.
+ */
+void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
+{
+ struct nfs_writeargs *argp = &data->args;
+ struct nfs_writeres *resp = &data->res;
+ int status;
+
+ dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
+ task->tk_pid, task->tk_status);
+
+ /*
+ * ->write_done will attempt to use post-op attributes to detect
+ * conflicting writes by other clients. A strict interpretation
+ * of close-to-open would allow us to continue caching even if
+ * another writer had changed the file, but some applications
+ * depend on tighter cache coherency when writing.
+ */
+ status = NFS_PROTO(data->inode)->write_done(task, data);
+ if (status != 0)
+ return;
+ nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+ if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
+ /* We tried a write call, but the server did not
+ * commit data to stable storage even though we
+ * requested it.
+ * Note: There is a known bug in Tru64 < 5.0 in which
+ * the server reports NFS_DATA_SYNC, but performs
+ * NFS_FILE_SYNC. We therefore implement this checking
+ * as a dprintk() in order to avoid filling syslog.
+ */
+ static unsigned long complain;
+
+ /* Note this will print the MDS for a DS write */
+ if (time_before(complain, jiffies)) {
+ dprintk("NFS: faulty NFS server %s:"
+ " (committed = %d) != (stable = %d)\n",
+ NFS_SERVER(data->inode)->nfs_client->cl_hostname,
+ resp->verf->committed, argp->stable);
+ complain = jiffies + 300 * HZ;
+ }
+ }
+#endif
+ /* Is this a short write? */
+ if (task->tk_status >= 0 && resp->count < argp->count) {
+ static unsigned long complain;
+
+ nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
+
+ /* Has the server at least made some progress? */
+ if (resp->count != 0) {
+ /* Was this an NFSv2 write or an NFSv3 stable write? */
+ if (resp->verf->committed != NFS_UNSTABLE) {
+ /* Resend from where the server left off */
+ data->mds_offset += resp->count;
+ argp->offset += resp->count;
+ argp->pgbase += resp->count;
+ argp->count -= resp->count;
+ } else {
+ /* Resend as a stable write in order to avoid
+ * headaches in the case of a server crash.
+ */
+ argp->stable = NFS_FILE_SYNC;
+ }
+ rpc_restart_call_prepare(task);
+ return;
+ }
+ if (time_before(complain, jiffies)) {
+ printk(KERN_WARNING
+ "NFS: Server wrote zero bytes, expected %u.\n",
+ argp->count);
+ complain = jiffies + 300 * HZ;
+ }
+ /* Can't do anything about it except throw an error. */
+ task->tk_status = -EIO;
+ }
+ return;
+}
+
+
+#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
+static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
+{
+ int ret;
+
+ if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
+ return 1;
+ if (!may_wait)
+ return 0;
+ ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
+ NFS_INO_COMMIT,
+ nfs_wait_bit_killable,
+ TASK_KILLABLE);
+ return (ret < 0) ? ret : 1;
+}
+
+void nfs_commit_clear_lock(struct nfs_inode *nfsi)
+{
+ clear_bit(NFS_INO_COMMIT, &nfsi->flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
+}
+EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
+
+void nfs_commitdata_release(void *data)
+{
+ struct nfs_write_data *wdata = data;
+
+ put_nfs_open_context(wdata->args.context);
+ nfs_commit_free(wdata);
+}
+EXPORT_SYMBOL_GPL(nfs_commitdata_release);
+
+int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
+ const struct rpc_call_ops *call_ops,
+ int how)
+{
+ struct rpc_task *task;
+ int priority = flush_task_priority(how);
+ struct rpc_message msg = {
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = data->cred,
+ };
+ struct rpc_task_setup task_setup_data = {
+ .task = &data->task,
+ .rpc_client = clnt,
+ .rpc_message = &msg,
+ .callback_ops = call_ops,
+ .callback_data = data,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
+ .priority = priority,
+ };
+ /* Set up the initial task struct. */
+ NFS_PROTO(data->inode)->commit_setup(data, &msg);
+
+ dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
+
+ task = rpc_run_task(&task_setup_data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+ if (how & FLUSH_SYNC)
+ rpc_wait_for_completion_task(task);
+ rpc_put_task(task);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nfs_initiate_commit);
+
+/*
+ * Set up the argument/result storage required for the RPC call.
+ */
+void nfs_init_commit(struct nfs_write_data *data,
+ struct list_head *head,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs_page *first = nfs_list_entry(head->next);
+ struct inode *inode = first->wb_context->dentry->d_inode;
+
+ /* Set up the RPC argument and reply structs
+ * NB: take care not to mess about with data->commit et al. */
+
+ list_splice_init(head, &data->pages);
+
+ data->inode = inode;
+ data->cred = first->wb_context->cred;
+ data->lseg = lseg; /* reference transferred */
+ data->mds_ops = &nfs_commit_ops;
+
+ data->args.fh = NFS_FH(data->inode);
+ /* Note: we always request a commit of the entire inode */
+ data->args.offset = 0;
+ data->args.count = 0;
+ data->args.context = get_nfs_open_context(first->wb_context);
+ data->res.count = 0;
+ data->res.fattr = &data->fattr;
+ data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
+}
+EXPORT_SYMBOL_GPL(nfs_init_commit);
+
+void nfs_retry_commit(struct list_head *page_list,
+ struct pnfs_layout_segment *lseg)
+{
+ struct nfs_page *req;
+
+ while (!list_empty(page_list)) {
+ req = nfs_list_entry(page_list->next);
+ nfs_list_remove_request(req);
+ nfs_mark_request_commit(req, lseg);
+ dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
+ dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
+ BDI_RECLAIMABLE);
+ nfs_unlock_request(req);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_retry_commit);
+
+/*
+ * Commit dirty pages
+ */
+static int
+nfs_commit_list(struct inode *inode, struct list_head *head, int how)
+{
+ struct nfs_write_data *data;
+
+ data = nfs_commitdata_alloc();
+
+ if (!data)
+ goto out_bad;
+
+ /* Set up the argument struct */
+ nfs_init_commit(data, head, NULL);
+ return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
+ out_bad:
+ nfs_retry_commit(head, NULL);
+ nfs_commit_clear_lock(NFS_I(inode));
+ return -ENOMEM;
+}
+
+/*
+ * COMMIT call returned
+ */
+static void nfs_commit_done(struct rpc_task *task, void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+
+ dprintk("NFS: %5u nfs_commit_done (status %d)\n",
+ task->tk_pid, task->tk_status);
+
+ /* Call the NFS version-specific code */
+ NFS_PROTO(data->inode)->commit_done(task, data);
+}
+
+void nfs_commit_release_pages(struct nfs_write_data *data)
+{
+ struct nfs_page *req;
+ int status = data->task.tk_status;
+
+ while (!list_empty(&data->pages)) {
+ req = nfs_list_entry(data->pages.next);
+ nfs_list_remove_request(req);
+ nfs_clear_page_commit(req->wb_page);
+
+ dprintk("NFS: commit (%s/%lld %d@%lld)",
+ req->wb_context->dentry->d_sb->s_id,
+ (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
+ req->wb_bytes,
+ (long long)req_offset(req));
+ if (status < 0) {
+ nfs_context_set_write_error(req->wb_context, status);
+ nfs_inode_remove_request(req);
+ dprintk(", error = %d\n", status);
+ goto next;
+ }
+
+ /* Okay, COMMIT succeeded, apparently. Check the verifier
+ * returned by the server against all stored verfs. */
+ if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
+ /* We have a match */
+ nfs_inode_remove_request(req);
+ dprintk(" OK\n");
+ goto next;
+ }
+ /* We have a mismatch. Write the page again */
+ dprintk(" mismatch\n");
+ nfs_mark_request_dirty(req);
+ next:
+ nfs_unlock_request(req);
+ }
+}
+EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
+
+static void nfs_commit_release(void *calldata)
+{
+ struct nfs_write_data *data = calldata;
+
+ nfs_commit_release_pages(data);
+ nfs_commit_clear_lock(NFS_I(data->inode));
+ nfs_commitdata_release(calldata);
+}
+
+static const struct rpc_call_ops nfs_commit_ops = {
+ .rpc_call_prepare = nfs_write_prepare,
+ .rpc_call_done = nfs_commit_done,
+ .rpc_release = nfs_commit_release,
+};
+
+int nfs_commit_inode(struct inode *inode, int how)
+{
+ LIST_HEAD(head);
+ int may_wait = how & FLUSH_SYNC;
+ int res;
+
+ res = nfs_commit_set_lock(NFS_I(inode), may_wait);
+ if (res <= 0)
+ goto out_mark_dirty;
+ res = nfs_scan_commit(inode, &head);
+ if (res) {
+ int error;
+
+ error = pnfs_commit_list(inode, &head, how);
+ if (error == PNFS_NOT_ATTEMPTED)
+ error = nfs_commit_list(inode, &head, how);
+ if (error < 0)
+ return error;
+ if (!may_wait)
+ goto out_mark_dirty;
+ error = wait_on_bit(&NFS_I(inode)->flags,
+ NFS_INO_COMMIT,
+ nfs_wait_bit_killable,
+ TASK_KILLABLE);
+ if (error < 0)
+ return error;
+ } else
+ nfs_commit_clear_lock(NFS_I(inode));
+ return res;
+ /* Note: If we exit without ensuring that the commit is complete,
+ * we must mark the inode as dirty. Otherwise, future calls to
+ * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
+ * that the data is on the disk.
+ */
+out_mark_dirty:
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ return res;
+}
+
+static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ int flags = FLUSH_SYNC;
+ int ret = 0;
+
+ /* no commits means nothing needs to be done */
+ if (!nfsi->ncommit)
+ return ret;
+
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ /* Don't commit yet if this is a non-blocking flush and there
+ * are a lot of outstanding writes for this mapping.
+ */
+ if (nfsi->ncommit <= (nfsi->npages >> 1))
+ goto out_mark_dirty;
+
+ /* don't wait for the COMMIT response */
+ flags = 0;
+ }
+
+ ret = nfs_commit_inode(inode, flags);
+ if (ret >= 0) {
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ if (ret < wbc->nr_to_write)
+ wbc->nr_to_write -= ret;
+ else
+ wbc->nr_to_write = 0;
+ }
+ return 0;
+ }
+out_mark_dirty:
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ return ret;
+}
+#else
+static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
+{
+ return 0;
+}
+#endif
+
+int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ int ret;
+
+ ret = nfs_commit_unstable_pages(inode, wbc);
+ if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
+ int status;
+ bool sync = true;
+
+ if (wbc->sync_mode == WB_SYNC_NONE)
+ sync = false;
+
+ status = pnfs_layoutcommit_inode(inode, sync);
+ if (status < 0)
+ return status;
+ }
+ return ret;
+}
+
+/*
+ * flush the inode to disk.
+ */
+int nfs_wb_all(struct inode *inode)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = LONG_MAX,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
+ };
+
+ return sync_inode(inode, &wbc);
+}
+
+int nfs_wb_page_cancel(struct inode *inode, struct page *page)
+{
+ struct nfs_page *req;
+ int ret = 0;
+
+ BUG_ON(!PageLocked(page));
+ for (;;) {
+ wait_on_page_writeback(page);
+ req = nfs_page_find_request(page);
+ if (req == NULL)
+ break;
+ if (nfs_lock_request_dontget(req)) {
+ nfs_clear_request_commit(req);
+ nfs_inode_remove_request(req);
+ /*
+ * In case nfs_inode_remove_request has marked the
+ * page as being dirty
+ */
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
+ nfs_unlock_request(req);
+ break;
+ }
+ ret = nfs_wait_on_request(req);
+ nfs_release_request(req);
+ if (ret < 0)
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Write back all requests on one page - we do this before reading it.
+ */
+int nfs_wb_page(struct inode *inode, struct page *page)
+{
+ loff_t range_start = page_offset(page);
+ loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL,
+ .nr_to_write = 0,
+ .range_start = range_start,
+ .range_end = range_end,
+ };
+ int ret;
+
+ for (;;) {
+ wait_on_page_writeback(page);
+ if (clear_page_dirty_for_io(page)) {
+ ret = nfs_writepage_locked(page, &wbc);
+ if (ret < 0)
+ goto out_error;
+ continue;
+ }
+ if (!PagePrivate(page))
+ break;
+ ret = nfs_commit_inode(inode, FLUSH_SYNC);
+ if (ret < 0)
+ goto out_error;
+ }
+ return 0;
+out_error:
+ return ret;
+}
+
+#ifdef CONFIG_MIGRATION
+int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{
+ /*
+ * If PagePrivate is set, then the page is currently associated with
+ * an in-progress read or write request. Don't try to migrate it.
+ *
+ * FIXME: we could do this in principle, but we'll need a way to ensure
+ * that we can safely release the inode reference while holding
+ * the page lock.
+ */
+ if (PagePrivate(page))
+ return -EBUSY;
+
+ nfs_fscache_release_page(page, GFP_KERNEL);
+
+ return migrate_page(mapping, newpage, page, mode);
+}
+#endif
+
+int __init nfs_init_writepagecache(void)
+{
+ nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
+ sizeof(struct nfs_write_data),
+ 0, SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (nfs_wdata_cachep == NULL)
+ return -ENOMEM;
+
+ nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
+ nfs_wdata_cachep);
+ if (nfs_wdata_mempool == NULL)
+ return -ENOMEM;
+
+ nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
+ nfs_wdata_cachep);
+ if (nfs_commit_mempool == NULL)
+ return -ENOMEM;
+
+ /*
+ * NFS congestion size, scale with available memory.
+ *
+ * 64MB: 8192k
+ * 128MB: 11585k
+ * 256MB: 16384k
+ * 512MB: 23170k
+ * 1GB: 32768k
+ * 2GB: 46340k
+ * 4GB: 65536k
+ * 8GB: 92681k
+ * 16GB: 131072k
+ *
+ * This allows larger machines to have larger/more transfers.
+ * Limit the default to 256M
+ */
+ nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
+ if (nfs_congestion_kb > 256*1024)
+ nfs_congestion_kb = 256*1024;
+
+ return 0;
+}
+
+void nfs_destroy_writepagecache(void)
+{
+ mempool_destroy(nfs_commit_mempool);
+ mempool_destroy(nfs_wdata_mempool);
+ kmem_cache_destroy(nfs_wdata_cachep);
+}
+