Merge tag 'nfs-for-7.0-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
 "New Features:
   - Use an LRU list for returning unused delegations
   - Introduce a KConfig option to disable NFS v4.0 and make NFS v4.1
     the default

  Bugfixes:
   - NFS/localio:
       - Handle short writes by retrying
       - Prevent direct reclaim recursion into NFS via nfs_writepages
       - Use GFP_NOIO and non-memreclaim workqueue in nfs_local_commit
       - Remove -EAGAIN handling in nfs_local_doio()
   - pNFS: fix a missing wake up while waiting on NFS_LAYOUT_DRAIN
   - fs/nfs: Fix a readdir slow-start regression
   - SUNRPC: fix gss_auth kref leak in gss_alloc_msg error path

  Other cleanups and improvements:
   - A few other NFS/localio cleanups
   - Various other delegation handling cleanups from Christoph
   - Unify security_inode_listsecurity() calls
   - Improvements to NFSv4 lease handling
   - Clean up SUNRPC *_debug fields when CONFIG_SUNRPC_DEBUG is not set"

* tag 'nfs-for-7.0-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (60 commits)
  SUNRPC: fix gss_auth kref leak in gss_alloc_msg error path
  nfs: nfs4proc: Convert comma to semicolon
  SUNRPC: Change list definition method
  sunrpc: rpc_debug and others are defined even if CONFIG_SUNRPC_DEBUG unset
  NFSv4: limit lease period in nfs4_set_lease_period()
  NFSv4: pass lease period in seconds to nfs4_set_lease_period()
  nfs: unify security_inode_listsecurity() calls
  fs/nfs: Fix readdir slow-start regression
  pNFS: fix a missing wake up while waiting on NFS_LAYOUT_DRAIN
  NFS: fix delayed delegation return handling
  NFS: simplify error handling in nfs_end_delegation_return
  NFS: fold nfs_abort_delegation_return into nfs_end_delegation_return
  NFS: remove the delegation == NULL check in nfs_end_delegation_return
  NFS: use bool for the issync argument to nfs_end_delegation_return
  NFS: return void from ->return_delegation
  NFS: return void from nfs4_inode_make_writeable
  NFS: Merge CONFIG_NFS_V4_1 with CONFIG_NFS_V4
  NFS: Add a way to disable NFS v4.0 via KConfig
  NFS: Move sequence slot operations into minorversion operations
  NFS: Pass a struct nfs_client to nfs4_init_sequence()
  ...
This commit is contained in:
Linus Torvalds
2026-02-12 17:49:33 -08:00
41 changed files with 1374 additions and 1558 deletions

View File

@@ -78,9 +78,10 @@ config NFS_V4
tristate "NFS client support for NFS version 4"
depends on NFS_FS
select KEYS
select SUNRPC_BACKCHANNEL
help
This option enables support for version 4 of the NFS protocol
(RFC 3530) in the kernel's NFS client.
This option enables support for version 4.1 of the NFS protocol
(RFC 5661) in the kernel's NFS client.
To mount NFS servers using NFSv4, you also need to install user
space programs which can be found in the Linux nfs-utils package,
@@ -96,19 +97,18 @@ config NFS_SWAP
help
This option enables swapon to work on files located on NFS mounts.
config NFS_V4_1
bool "NFS client support for NFSv4.1"
config NFS_V4_0
bool "NFS client support for NFSv4.0"
depends on NFS_V4
select SUNRPC_BACKCHANNEL
help
This option enables support for minor version 1 of the NFSv4 protocol
(RFC 5661) in the kernel's NFS client.
This option enables support for minor version 0 of the NFSv4 protocol
(RFC 3530) in the kernel's NFS client.
If unsure, say N.
config NFS_V4_2
bool "NFS client support for NFSv4.2"
depends on NFS_V4_1
depends on NFS_V4
help
This option enables support for minor version 2 of the NFSv4 protocol
in the kernel's NFS client.
@@ -117,22 +117,22 @@ config NFS_V4_2
config PNFS_FILE_LAYOUT
tristate
depends on NFS_V4_1
depends on NFS_V4
default NFS_V4
config PNFS_BLOCK
tristate
depends on NFS_V4_1 && BLK_DEV_DM
depends on NFS_V4 && BLK_DEV_DM
default NFS_V4
config PNFS_FLEXFILE_LAYOUT
tristate
depends on NFS_V4_1
depends on NFS_V4
default NFS_V4
config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
string "NFSv4.1 Implementation ID Domain"
depends on NFS_V4_1
depends on NFS_V4
default "kernel.org"
help
This option defines the domain portion of the implementation ID that
@@ -144,7 +144,7 @@ config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
config NFS_V4_1_MIGRATION
bool "NFSv4.1 client support for migration"
depends on NFS_V4_1
depends on NFS_V4
default n
help
This option makes the NFS client advertise to NFSv4.1 servers that

View File

@@ -27,10 +27,10 @@ CFLAGS_nfs4trace.o += -I$(src)
nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \
delegation.o nfs4idmap.o callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o nfs4getroot.o nfs4client.o nfs4session.o \
dns_resolve.o nfs4trace.o
dns_resolve.o nfs4trace.o pnfs.o pnfs_dev.o pnfs_nfs.o
nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o
nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o
nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o pnfs_nfs.o
nfsv4-$(CONFIG_NFS_V4_0) += nfs40client.o nfs40proc.o
nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o nfs42xattr.o
obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/

View File

@@ -87,7 +87,6 @@ nfs4_callback_svc(void *vrqstp)
return 0;
}
#if defined(CONFIG_NFS_V4_1)
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct svc_serv *serv)
{
@@ -98,12 +97,6 @@ static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
*/
xprt->bc_serv = serv;
}
#else
static inline void nfs_callback_bc_serv(u32 minorversion, struct rpc_xprt *xprt,
struct svc_serv *serv)
{
}
#endif /* CONFIG_NFS_V4_1 */
static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
struct svc_serv *serv)
@@ -157,7 +150,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
}
ret = 0;
if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
if (minorversion == 0)
ret = nfs4_callback_up_net(serv, net);
else if (xprt->ops->bc_setup)
set_bc_enabled(serv);
@@ -198,10 +191,6 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
cb_info->users);
threadfn = nfs4_callback_svc;
#if !defined(CONFIG_NFS_V4_1)
if (minorversion)
return ERR_PTR(-ENOTSUPP);
#endif
serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE,
threadfn);
if (!serv) {

View File

@@ -65,8 +65,6 @@ struct cb_recallargs {
uint32_t truncate;
};
#if defined(CONFIG_NFS_V4_1)
struct referring_call {
uint32_t rc_sequenceid;
uint32_t rc_slotid;
@@ -168,7 +166,6 @@ struct cb_notify_lock_args {
extern __be32 nfs4_callback_notify_lock(void *argp, void *resp,
struct cb_process_state *cps);
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
struct cb_offloadargs {
struct nfs_fh coa_fh;

View File

@@ -51,12 +51,18 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
-ntohl(res->status));
goto out;
}
rcu_read_lock();
delegation = nfs4_get_valid_delegation(inode);
if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
if (!delegation)
goto out_iput;
res->size = i_size_read(inode);
if ((delegation->type & FMODE_WRITE) == 0) {
nfs_put_delegation(delegation);
goto out_iput;
}
res->change_attr = delegation->change_attr;
nfs_put_delegation(delegation);
res->size = i_size_read(inode);
if (nfs_have_writebacks(inode))
res->change_attr++;
res->atime = inode_get_atime(inode);
@@ -71,7 +77,6 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
FATTR4_WORD2_TIME_DELEG_MODIFY) & args->bitmap[2];
res->status = 0;
out_iput:
rcu_read_unlock();
trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
nfs_iput_and_deactive(inode);
out:
@@ -121,8 +126,6 @@ out:
return res;
}
#if defined(CONFIG_NFS_V4_1)
/*
* Lookup a layout inode by stateid
*
@@ -693,7 +696,6 @@ __be32 nfs4_callback_notify_lock(void *argp, void *resp,
return htonl(NFS4_OK);
}
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
struct cb_offloadargs *args)

View File

@@ -30,7 +30,6 @@
(2 + 2 + 3 + 3 + 3 + 3 + 3) * 4)
#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#if defined(CONFIG_NFS_V4_1)
#define CB_OP_LAYOUTRECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_DEVICENOTIFY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_SEQUENCE_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
@@ -39,7 +38,6 @@
#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_NOTIFY_LOCK_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
#define CB_OP_OFFLOAD_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#endif /* CONFIG_NFS_V4_2 */
@@ -205,7 +203,6 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp,
return decode_fh(xdr, &args->fh);
}
#if defined(CONFIG_NFS_V4_1)
static __be32 decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
stateid->type = NFS4_LAYOUT_STATEID_TYPE;
@@ -521,7 +518,6 @@ static __be32 decode_notify_lock_args(struct svc_rqst *rqstp,
return decode_lockowner(xdr, args);
}
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
static __be32 decode_write_response(struct xdr_stream *xdr,
struct cb_offloadargs *args)
@@ -747,8 +743,6 @@ out:
return status;
}
#if defined(CONFIG_NFS_V4_1)
static __be32 encode_sessionid(struct xdr_stream *xdr,
const struct nfs4_sessionid *sid)
{
@@ -846,19 +840,6 @@ static void nfs4_cb_free_slot(struct cb_process_state *cps)
}
}
#else /* CONFIG_NFS_V4_1 */
static __be32
preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
{
return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
}
static void nfs4_cb_free_slot(struct cb_process_state *cps)
{
}
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
static __be32
preprocess_nfs42_op(int nop, unsigned int op_nr, struct callback_op **op)
@@ -1051,7 +1032,6 @@ static struct callback_op callback_ops[] = {
.decode_args = decode_recall_args,
.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
},
#if defined(CONFIG_NFS_V4_1)
[OP_CB_LAYOUTRECALL] = {
.process_op = nfs4_callback_layoutrecall,
.decode_args = decode_layoutrecall_args,
@@ -1083,7 +1063,6 @@ static struct callback_op callback_ops[] = {
.decode_args = decode_notify_lock_args,
.res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ,
},
#endif /* CONFIG_NFS_V4_1 */
#ifdef CONFIG_NFS_V4_2
[OP_CB_OFFLOAD] = {
.process_op = nfs4_callback_offload,

View File

@@ -1060,6 +1060,10 @@ struct nfs_server *nfs_alloc_server(void)
INIT_LIST_HEAD(&server->client_link);
INIT_LIST_HEAD(&server->master_link);
INIT_LIST_HEAD(&server->delegations);
spin_lock_init(&server->delegations_lock);
INIT_LIST_HEAD(&server->delegations_return);
INIT_LIST_HEAD(&server->delegations_lru);
INIT_LIST_HEAD(&server->delegations_delayed);
INIT_LIST_HEAD(&server->layouts);
INIT_LIST_HEAD(&server->state_owners_lru);
INIT_LIST_HEAD(&server->ss_copies);
@@ -1263,11 +1267,9 @@ void nfs_clients_init(struct net *net)
INIT_LIST_HEAD(&nn->nfs_volume_list);
#if IS_ENABLED(CONFIG_NFS_V4)
idr_init(&nn->cb_ident_idr);
#endif
#if IS_ENABLED(CONFIG_NFS_V4_1)
INIT_LIST_HEAD(&nn->nfs4_data_server_cache);
spin_lock_init(&nn->nfs4_data_server_lock);
#endif
#endif /* CONFIG_NFS_V4 */
spin_lock_init(&nn->nfs_client_lock);
nn->boot_time = ktime_get_real();
memset(&nn->rpcstats, 0, sizeof(nn->rpcstats));
@@ -1284,9 +1286,9 @@ void nfs_clients_exit(struct net *net)
nfs_cleanup_cb_ident_idr(net);
WARN_ON_ONCE(!list_empty(&nn->nfs_client_list));
WARN_ON_ONCE(!list_empty(&nn->nfs_volume_list));
#if IS_ENABLED(CONFIG_NFS_V4_1)
#if IS_ENABLED(CONFIG_NFS_V4)
WARN_ON_ONCE(!list_empty(&nn->nfs4_data_server_cache));
#endif
#endif /* CONFIG_NFS_V4 */
}
#ifdef CONFIG_PROC_FS

View File

@@ -52,33 +52,33 @@ static void __nfs_free_delegation(struct nfs_delegation *delegation)
static void nfs_mark_delegation_revoked(struct nfs_server *server,
struct nfs_delegation *delegation)
{
if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
atomic_long_dec(&server->nr_active_delegations);
if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
nfs_clear_verifier_delegated(delegation->inode);
bool put_ref = false;
if (test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
return;
delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
atomic_long_dec(&server->nr_active_delegations);
if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
nfs_clear_verifier_delegated(delegation->inode);
spin_lock(&server->delegations_lock);
if (!list_empty(&delegation->entry)) {
list_del_init(&delegation->entry);
put_ref = true;
}
spin_unlock(&server->delegations_lock);
if (put_ref)
nfs_put_delegation(delegation);
}
static struct nfs_delegation *nfs_get_delegation(struct nfs_delegation *delegation)
{
refcount_inc(&delegation->refcount);
return delegation;
}
static void nfs_put_delegation(struct nfs_delegation *delegation)
void nfs_put_delegation(struct nfs_delegation *delegation)
{
if (refcount_dec_and_test(&delegation->refcount))
__nfs_free_delegation(delegation);
}
static void nfs_free_delegation(struct nfs_server *server,
struct nfs_delegation *delegation)
{
nfs_mark_delegation_revoked(server, delegation);
nfs_put_delegation(delegation);
}
/**
* nfs_mark_delegation_referenced - set delegation's REFERENCED flag
* @delegation: delegation to process
@@ -92,8 +92,12 @@ void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
static void nfs_mark_return_delegation(struct nfs_server *server,
struct nfs_delegation *delegation)
{
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
spin_lock(&server->delegations_lock);
if (list_empty(&delegation->entry))
refcount_inc(&delegation->refcount);
list_move_tail(&delegation->entry, &server->delegations_return);
spin_unlock(&server->delegations_lock);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
}
@@ -111,10 +115,14 @@ struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
{
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (nfs4_is_valid_delegation(delegation, 0))
return delegation;
return NULL;
if (!nfs4_is_valid_delegation(delegation, 0) ||
!refcount_inc_not_zero(&delegation->refcount))
delegation = NULL;
rcu_read_unlock();
return delegation;
}
static int nfs4_do_check_delegation(struct inode *inode, fmode_t type,
@@ -308,78 +316,51 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
spin_lock(&delegation->lock);
if (delegation->inode != NULL)
inode = igrab(delegation->inode);
if (!inode)
set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
spin_unlock(&delegation->lock);
return inode;
}
static struct nfs_delegation *
nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
{
struct nfs_delegation *ret = NULL;
struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
if (delegation->inode &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
}
spin_unlock(&delegation->lock);
if (ret)
nfs_clear_verifier_delegated(&nfsi->vfs_inode);
out:
return ret;
}
static struct nfs_delegation *
nfs_start_delegation_return(struct nfs_inode *nfsi)
{
struct nfs_delegation *delegation;
bool return_now = false;
rcu_read_lock();
delegation = nfs_start_delegation_return_locked(nfsi);
delegation = rcu_dereference(nfsi->delegation);
if (!delegation || !refcount_inc_not_zero(&delegation->refcount)) {
rcu_read_unlock();
return NULL;
}
rcu_read_unlock();
spin_lock(&delegation->lock);
if (delegation->inode &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
return_now = true;
spin_unlock(&delegation->lock);
if (!return_now) {
nfs_put_delegation(delegation);
return NULL;
}
nfs_clear_verifier_delegated(&nfsi->vfs_inode);
return delegation;
}
static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
struct nfs_server *server, int err)
{
spin_lock(&delegation->lock);
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
if (err == -EAGAIN) {
set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
set_bit(NFS4SERV_DELEGRETURN_DELAYED,
&server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN_DELAYED,
&server->nfs_client->cl_state);
}
spin_unlock(&delegation->lock);
}
static struct nfs_delegation *
nfs_detach_delegation_locked(struct nfs_inode *nfsi,
static bool
nfs_detach_delegations_locked(struct nfs_inode *nfsi,
struct nfs_delegation *delegation,
struct nfs_client *clp)
{
struct nfs_delegation *deleg_cur =
rcu_dereference_protected(nfsi->delegation,
lockdep_is_held(&clp->cl_lock));
lockdep_assert_held(&clp->cl_lock);
trace_nfs4_detach_delegation(&nfsi->vfs_inode, delegation->type);
if (deleg_cur == NULL || delegation != deleg_cur)
return NULL;
spin_lock(&delegation->lock);
if (!delegation->inode) {
spin_unlock(&delegation->lock);
return NULL;
return false;
}
hlist_del_init_rcu(&delegation->hash);
list_del_rcu(&delegation->super_list);
@@ -387,34 +368,24 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
rcu_assign_pointer(nfsi->delegation, NULL);
spin_unlock(&delegation->lock);
clear_bit(NFS_INO_REQ_DIR_DELEG, &nfsi->flags);
return delegation;
return true;
}
static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
static bool nfs_detach_delegation(struct nfs_inode *nfsi,
struct nfs_delegation *delegation,
struct nfs_server *server)
{
struct nfs_client *clp = server->nfs_client;
struct nfs_delegation *deleg_cur;
bool ret = false;
spin_lock(&clp->cl_lock);
delegation = nfs_detach_delegation_locked(nfsi, delegation, clp);
deleg_cur = rcu_dereference_protected(nfsi->delegation,
lockdep_is_held(&clp->cl_lock));
if (delegation == deleg_cur)
ret = nfs_detach_delegations_locked(nfsi, delegation, clp);
spin_unlock(&clp->cl_lock);
return delegation;
}
static struct nfs_delegation *
nfs_inode_detach_delegation(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
if (delegation != NULL)
delegation = nfs_detach_delegation(nfsi, delegation, server);
rcu_read_unlock();
return delegation;
return ret;
}
static void
@@ -482,6 +453,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
delegation->cred = get_cred(cred);
delegation->inode = inode;
delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
INIT_LIST_HEAD(&delegation->entry);
switch (deleg_type) {
case NFS4_OPEN_DELEGATE_READ_ATTRS_DELEG:
case NFS4_OPEN_DELEGATE_WRITE_ATTRS_DELEG:
@@ -524,9 +496,9 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
&old_delegation->flags))
goto out;
}
freeme = nfs_detach_delegation_locked(nfsi, old_delegation, clp);
if (freeme == NULL)
if (!nfs_detach_delegations_locked(nfsi, old_delegation, clp))
goto out;
freeme = old_delegation;
add_new:
/*
* If we didn't revalidate the change attribute before setting
@@ -564,23 +536,24 @@ out:
__nfs_free_delegation(delegation);
if (freeme != NULL) {
nfs_do_return_delegation(inode, freeme, 0);
nfs_free_delegation(server, freeme);
nfs_mark_delegation_revoked(server, freeme);
nfs_put_delegation(freeme);
}
return status;
}
/*
* Basic procedure for returning a delegation to the server
* Basic procedure for returning a delegation to the server.
* If @issync is set, wait until state recovery has finished. Otherwise
* return -EAGAIN to the caller if we need more time.
*/
static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
static int nfs_end_delegation_return(struct inode *inode,
struct nfs_delegation *delegation, bool issync)
{
struct nfs_server *server = NFS_SERVER(inode);
unsigned int mode = O_WRONLY | O_RDWR;
int err = 0;
if (delegation == NULL)
return 0;
/* Directory delegations don't require any state recovery */
if (!S_ISREG(inode->i_mode))
goto out_return;
@@ -595,133 +568,154 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
break;
err = nfs_delegation_claim_opens(inode, &delegation->stateid,
delegation->type);
if (!issync || err != -EAGAIN)
if (!err)
break;
if (err != -EAGAIN)
goto abort;
if (!issync)
goto delay;
/*
* Guard against state recovery
*/
err = nfs4_wait_clnt_recover(server->nfs_client);
}
if (err) {
nfs_abort_delegation_return(delegation, server, err);
goto out;
}
out_return:
err = nfs_do_return_delegation(inode, delegation, issync);
out:
/* Refcount matched in nfs_start_delegation_return_locked() */
nfs_put_delegation(delegation);
return nfs_do_return_delegation(inode, delegation, issync);
delay:
spin_lock(&server->delegations_lock);
if (list_empty(&delegation->entry))
refcount_inc(&delegation->refcount);
list_move_tail(&delegation->entry, &server->delegations_return);
spin_unlock(&server->delegations_lock);
set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &server->nfs_client->cl_state);
abort:
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
return err;
}
static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
static int nfs_return_one_delegation(struct nfs_server *server)
{
bool ret = false;
struct nfs_delegation *delegation;
struct inode *inode;
int err = 0;
trace_nfs_delegation_need_return(delegation);
spin_lock(&server->delegations_lock);
delegation = list_first_entry_or_null(&server->delegations_return,
struct nfs_delegation, entry);
if (!delegation) {
spin_unlock(&server->delegations_lock);
return 0; /* no more delegations */
}
list_del_init(&delegation->entry);
spin_unlock(&server->delegations_lock);
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
ret = true;
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
ret = false;
spin_lock(&delegation->lock);
inode = delegation->inode;
if (!inode || !igrab(inode)) {
spin_unlock(&delegation->lock);
goto out_put_delegation;
}
if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
spin_unlock(&delegation->lock);
goto out_put_inode;
}
spin_unlock(&delegation->lock);
return ret;
nfs_clear_verifier_delegated(inode);
err = nfs_end_delegation_return(inode, delegation, false);
out_put_inode:
iput(inode);
out_put_delegation:
nfs_put_delegation(delegation);
if (err)
return err;
return 1; /* keep going */
}
static int nfs_server_return_marked_delegations(struct nfs_server *server,
void __always_unused *data)
{
struct nfs_delegation *delegation;
struct nfs_delegation *prev;
struct inode *inode;
struct inode *place_holder = NULL;
struct nfs_delegation *place_holder_deleg = NULL;
int err = 0;
int err;
if (!test_and_clear_bit(NFS4SERV_DELEGRETURN,
&server->delegation_flags))
return 0;
restart:
/*
* To avoid quadratic looping we hold a reference
* to an inode place_holder. Each time we restart, we
* list delegation in the server from the delegations
* of that inode.
* prev is an RCU-protected pointer to a delegation which
* wasn't marked for return and might be a good choice for
* the next place_holder.
*/
prev = NULL;
delegation = NULL;
rcu_read_lock();
if (place_holder)
delegation = rcu_dereference(NFS_I(place_holder)->delegation);
if (!delegation || delegation != place_holder_deleg)
delegation = list_entry_rcu(server->delegations.next,
struct nfs_delegation, super_list);
list_for_each_entry_from_rcu(delegation, &server->delegations, super_list) {
struct inode *to_put = NULL;
if (test_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags))
continue;
if (!nfs_delegation_need_return(delegation)) {
if (nfs4_is_valid_delegation(delegation, 0))
prev = delegation;
continue;
}
inode = nfs_delegation_grab_inode(delegation);
if (inode == NULL)
continue;
if (prev) {
struct inode *tmp = nfs_delegation_grab_inode(prev);
if (tmp) {
to_put = place_holder;
place_holder = tmp;
place_holder_deleg = prev;
}
}
delegation = nfs_start_delegation_return_locked(NFS_I(inode));
rcu_read_unlock();
iput(to_put);
err = nfs_end_delegation_return(inode, delegation, 0);
iput(inode);
while ((err = nfs_return_one_delegation(server)) > 0)
cond_resched();
if (!err)
goto restart;
set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
goto out;
}
rcu_read_unlock();
out:
iput(place_holder);
return err;
}
static inline bool nfs_delegations_over_limit(struct nfs_server *server)
{
return !list_empty_careful(&server->delegations_lru) &&
atomic_long_read(&server->nr_active_delegations) >
nfs_delegation_watermark;
}
static void nfs_delegations_return_from_lru(struct nfs_server *server)
{
struct nfs_delegation *d, *n;
unsigned int pass = 0;
bool moved = false;
retry:
spin_lock(&server->delegations_lock);
list_for_each_entry_safe(d, n, &server->delegations_lru, entry) {
if (!nfs_delegations_over_limit(server))
break;
if (pass == 0 && test_bit(NFS_DELEGATION_REFERENCED, &d->flags))
continue;
list_move_tail(&d->entry, &server->delegations_return);
moved = true;
}
spin_unlock(&server->delegations_lock);
/*
* If we are still over the limit, try to reclaim referenced delegations
* as well.
*/
if (pass == 0 && nfs_delegations_over_limit(server)) {
pass++;
goto retry;
}
if (moved) {
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
nfs4_schedule_state_manager(server->nfs_client);
}
}
static void nfs_delegation_add_lru(struct nfs_server *server,
struct nfs_delegation *delegation)
{
spin_lock(&server->delegations_lock);
if (list_empty(&delegation->entry)) {
list_add_tail(&delegation->entry, &server->delegations_lru);
refcount_inc(&delegation->refcount);
}
spin_unlock(&server->delegations_lock);
if (nfs_delegations_over_limit(server))
nfs_delegations_return_from_lru(server);
}
static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
{
struct nfs_delegation *d;
bool ret = false;
if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED,
&server->delegation_flags))
goto out;
list_for_each_entry_rcu (d, &server->delegations, super_list) {
if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
continue;
nfs_mark_return_delegation(server, d);
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
if (list_empty_careful(&server->delegations_delayed))
return false;
spin_lock(&server->delegations_lock);
if (!list_empty(&server->delegations_delayed)) {
list_splice_tail_init(&server->delegations_delayed,
&server->delegations_return);
ret = true;
}
out:
spin_unlock(&server->delegations_lock);
return ret;
}
@@ -731,14 +725,17 @@ static bool nfs_client_clear_delayed_delegations(struct nfs_client *clp)
bool ret = false;
if (!test_and_clear_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state))
goto out;
return false;
rcu_read_lock();
list_for_each_entry_rcu (server, &clp->cl_superblocks, client_link) {
if (nfs_server_clear_delayed_delegations(server))
ret = true;
}
rcu_read_unlock();
out:
if (ret)
set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
return ret;
}
@@ -774,15 +771,23 @@ int nfs_client_return_marked_delegations(struct nfs_client *clp)
*/
void nfs_inode_evict_delegation(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_delegation *delegation;
delegation = nfs_inode_detach_delegation(inode);
if (delegation != NULL) {
set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
nfs_do_return_delegation(inode, delegation, 1);
nfs_free_delegation(NFS_SERVER(inode), delegation);
}
rcu_read_lock();
delegation = rcu_dereference(nfsi->delegation);
if (delegation && !nfs_detach_delegation(nfsi, delegation, server))
delegation = NULL;
rcu_read_unlock();
if (!delegation)
return;
set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
nfs_do_return_delegation(inode, delegation, 1);
nfs_mark_delegation_revoked(server, delegation);
nfs_put_delegation(delegation);
}
/**
@@ -795,20 +800,21 @@ void nfs_inode_evict_delegation(struct inode *inode)
*
* Returns zero on success, or a negative errno value.
*/
int nfs4_inode_return_delegation(struct inode *inode)
void nfs4_inode_return_delegation(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
delegation = nfs_start_delegation_return(nfsi);
if (delegation != NULL) {
/* Synchronous recall of any application leases */
break_lease(inode, O_WRONLY | O_RDWR);
if (S_ISREG(inode->i_mode))
nfs_wb_all(inode);
return nfs_end_delegation_return(inode, delegation, 1);
}
return 0;
if (!delegation)
return;
/* Synchronous recall of any application leases */
break_lease(inode, O_WRONLY | O_RDWR);
if (S_ISREG(inode->i_mode))
nfs_wb_all(inode);
nfs_end_delegation_return(inode, delegation, true);
nfs_put_delegation(delegation);
}
/**
@@ -822,30 +828,30 @@ int nfs4_inode_return_delegation(struct inode *inode)
void nfs4_inode_set_return_delegation_on_close(struct inode *inode)
{
struct nfs_delegation *delegation;
struct nfs_delegation *ret = NULL;
bool return_now = false;
if (!inode)
return;
rcu_read_lock();
delegation = nfs4_get_valid_delegation(inode);
if (!delegation)
goto out;
return;
spin_lock(&delegation->lock);
if (!delegation->inode)
goto out_unlock;
if (list_empty(&NFS_I(inode)->open_files) &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
} else
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
return_now = true;
else
set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
out_unlock:
spin_unlock(&delegation->lock);
if (ret)
if (return_now) {
nfs_clear_verifier_delegated(inode);
out:
rcu_read_unlock();
nfs_end_delegation_return(inode, ret, 0);
nfs_end_delegation_return(inode, delegation, false);
}
nfs_put_delegation(delegation);
}
/**
@@ -857,33 +863,32 @@ out:
*/
void nfs4_inode_return_delegation_on_close(struct inode *inode)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_delegation *delegation;
struct nfs_delegation *ret = NULL;
bool return_now = false;
if (!inode)
return;
rcu_read_lock();
delegation = nfs4_get_valid_delegation(inode);
if (!delegation)
goto out;
if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) ||
atomic_long_read(&NFS_SERVER(inode)->nr_active_delegations) >=
nfs_delegation_watermark) {
return;
if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
spin_lock(&delegation->lock);
if (delegation->inode &&
list_empty(&NFS_I(inode)->open_files) &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
return_now = true;
}
spin_unlock(&delegation->lock);
if (ret)
nfs_clear_verifier_delegated(inode);
}
out:
rcu_read_unlock();
nfs_end_delegation_return(inode, ret, 0);
if (return_now) {
nfs_clear_verifier_delegated(inode);
nfs_end_delegation_return(inode, delegation, false);
} else {
nfs_delegation_add_lru(server, delegation);
}
nfs_put_delegation(delegation);
}
/**
@@ -891,23 +896,19 @@ out:
* @inode: pointer to inode
*
* Make the inode writeable by returning the delegation if necessary
*
* Returns zero on success, or a negative errno value.
*/
int nfs4_inode_make_writeable(struct inode *inode)
void nfs4_inode_make_writeable(struct inode *inode)
{
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = nfs4_get_valid_delegation(inode);
if (delegation == NULL ||
(nfs4_has_session(NFS_SERVER(inode)->nfs_client) &&
(delegation->type & FMODE_WRITE))) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
return nfs4_inode_return_delegation(inode);
if (!delegation)
return;
if (!nfs4_has_session(NFS_SERVER(inode)->nfs_client) ||
!(delegation->type & FMODE_WRITE))
nfs4_inode_return_delegation(inode);
nfs_put_delegation(delegation);
}
static void
@@ -916,7 +917,7 @@ nfs_mark_return_if_closed_delegation(struct nfs_server *server,
{
struct inode *inode;
if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) ||
if (!list_empty_careful(&server->delegations_return) ||
test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags))
return;
spin_lock(&delegation->lock);
@@ -943,16 +944,6 @@ static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
return ret;
}
static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
{
struct nfs_server *server;
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
nfs_server_mark_return_all_delegations(server);
rcu_read_unlock();
}
static void nfs_delegation_run_state_manager(struct nfs_client *clp)
{
if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
@@ -966,7 +957,13 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp)
*/
void nfs_expire_all_delegations(struct nfs_client *clp)
{
nfs_client_mark_return_all_delegations(clp);
struct nfs_server *server;
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
nfs_server_mark_return_all_delegations(server);
rcu_read_unlock();
nfs_delegation_run_state_manager(clp);
}
@@ -1006,8 +1003,7 @@ static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
}
}
static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
fmode_t flags)
void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
{
struct nfs_server *server;
@@ -1015,6 +1011,8 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
nfs_mark_return_unused_delegation_types(server, flags);
rcu_read_unlock();
nfs_delegation_run_state_manager(clp);
}
static void nfs_revoke_delegation(struct inode *inode,
@@ -1111,27 +1109,21 @@ void nfs_remove_bad_delegation(struct inode *inode,
}
EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
/**
* nfs_expire_unused_delegation_types
* @clp: client to process
* @flags: delegation types to expire
*
*/
void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
static bool nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
{
nfs_client_mark_return_unused_delegation_types(clp, flags);
nfs_delegation_run_state_manager(clp);
}
struct nfs_delegation *d, *n;
bool marked = false;
static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
{
struct nfs_delegation *delegation;
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
spin_lock(&server->delegations_lock);
list_for_each_entry_safe(d, n, &server->delegations_lru, entry) {
if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &d->flags))
continue;
nfs_mark_return_if_closed_delegation(server, delegation);
list_move_tail(&d->entry, &server->delegations_return);
marked = true;
}
spin_unlock(&server->delegations_lock);
return marked;
}
/**
@@ -1142,13 +1134,17 @@ static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
{
struct nfs_server *server;
bool marked = false;
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
nfs_mark_return_unreferenced_delegations(server);
marked |= nfs_mark_return_unreferenced_delegations(server);
rcu_read_unlock();
nfs_delegation_run_state_manager(clp);
if (marked) {
set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
nfs4_schedule_state_manager(clp);
}
}
/**
@@ -1165,24 +1161,24 @@ int nfs_async_inode_return_delegation(struct inode *inode,
struct nfs_client *clp = server->nfs_client;
struct nfs_delegation *delegation;
rcu_read_lock();
delegation = nfs4_get_valid_delegation(inode);
if (delegation == NULL)
goto out_enoent;
if (!delegation)
return -ENOENT;
if (stateid != NULL &&
!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
goto out_enoent;
!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
nfs_put_delegation(delegation);
return -ENOENT;
}
nfs_mark_return_delegation(server, delegation);
rcu_read_unlock();
nfs_put_delegation(delegation);
/* If there are any application leases or delegations, recall them */
break_lease(inode, O_WRONLY | O_RDWR | O_NONBLOCK);
nfs_delegation_run_state_manager(clp);
return 0;
out_enoent:
rcu_read_unlock();
return -ENOENT;
}
static struct inode *
@@ -1282,9 +1278,7 @@ static int nfs_server_reap_unclaimed_delegations(struct nfs_server *server,
restart:
rcu_read_lock();
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
if (test_bit(NFS_DELEGATION_INODE_FREEING,
&delegation->flags) ||
test_bit(NFS_DELEGATION_RETURNING,
if (test_bit(NFS_DELEGATION_RETURNING,
&delegation->flags) ||
test_bit(NFS_DELEGATION_NEED_RECLAIM,
&delegation->flags) == 0)
@@ -1292,13 +1286,15 @@ restart:
inode = nfs_delegation_grab_inode(delegation);
if (inode == NULL)
continue;
delegation = nfs_start_delegation_return_locked(NFS_I(inode));
delegation = nfs_start_delegation_return(NFS_I(inode));
rcu_read_unlock();
if (delegation != NULL) {
if (nfs_detach_delegation(NFS_I(inode), delegation,
server) != NULL)
nfs_free_delegation(server, delegation);
/* Match nfs_start_delegation_return_locked */
server)) {
nfs_mark_delegation_revoked(server, delegation);
nfs_put_delegation(delegation);
}
/* Match nfs_start_delegation_return */
nfs_put_delegation(delegation);
}
iput(inode);
@@ -1419,9 +1415,7 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
restart:
rcu_read_lock();
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
if (test_bit(NFS_DELEGATION_INODE_FREEING,
&delegation->flags) ||
test_bit(NFS_DELEGATION_RETURNING,
if (test_bit(NFS_DELEGATION_RETURNING,
&delegation->flags) ||
test_bit(NFS_DELEGATION_TEST_EXPIRED,
&delegation->flags) == 0 ||
@@ -1504,7 +1498,7 @@ int nfs_delegations_present(struct nfs_client *clp)
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
if (!list_empty(&server->delegations)) {
if (atomic_long_read(&server->nr_active_delegations) > 0) {
ret = 1;
break;
}

View File

@@ -26,19 +26,17 @@ struct nfs_delegation {
unsigned long flags;
refcount_t refcount;
spinlock_t lock;
struct list_head entry;
struct rcu_head rcu;
};
enum {
NFS_DELEGATION_NEED_RECLAIM = 0,
NFS_DELEGATION_RETURN,
NFS_DELEGATION_RETURN_IF_CLOSED,
NFS_DELEGATION_REFERENCED,
NFS_DELEGATION_RETURNING,
NFS_DELEGATION_REVOKED,
NFS_DELEGATION_TEST_EXPIRED,
NFS_DELEGATION_INODE_FREEING,
NFS_DELEGATION_RETURN_DELAYED,
NFS_DELEGATION_DELEGTIME,
};
@@ -48,7 +46,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
fmode_t type, const nfs4_stateid *stateid,
unsigned long pagemod_limit, u32 deleg_type);
int nfs4_inode_return_delegation(struct inode *inode);
void nfs4_inode_return_delegation(struct inode *inode);
void nfs4_inode_return_delegation_on_close(struct inode *inode);
void nfs4_inode_set_return_delegation_on_close(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
@@ -81,13 +79,14 @@ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_state
bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
void nfs_put_delegation(struct nfs_delegation *delegation);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags);
int nfs4_check_delegation(struct inode *inode, fmode_t type);
bool nfs4_delegation_flush_on_close(const struct inode *inode);
void nfs_inode_find_delegation_state_and_recover(struct inode *inode,
const nfs4_stateid *stateid);
int nfs4_inode_make_writeable(struct inode *inode);
void nfs4_inode_make_writeable(struct inode *inode);
#endif

View File

@@ -72,7 +72,7 @@ const struct address_space_operations nfs_dir_aops = {
.free_folio = nfs_readdir_clear_array,
};
#define NFS_INIT_DTSIZE PAGE_SIZE
#define NFS_INIT_DTSIZE SZ_64K
static struct nfs_open_dir_context *
alloc_nfs_open_dir_context(struct inode *dir)
@@ -83,7 +83,7 @@ alloc_nfs_open_dir_context(struct inode *dir)
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT);
if (ctx != NULL) {
ctx->attr_gencount = nfsi->attr_gencount;
ctx->dtsize = NFS_INIT_DTSIZE;
ctx->dtsize = min(NFS_SERVER(dir)->dtsize, NFS_INIT_DTSIZE);
spin_lock(&dir->i_lock);
if (list_empty(&nfsi->open_files) &&
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))

View File

@@ -806,7 +806,8 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->mount_server.version = result.uint_32;
break;
case Opt_minorversion:
if (result.uint_32 > NFS4_MAX_MINOR_VERSION)
if (result.uint_32 < NFS4_MIN_MINOR_VERSION ||
result.uint_32 > NFS4_MAX_MINOR_VERSION)
goto out_of_bounds;
ctx->minorversion = result.uint_32;
break;

View File

@@ -334,17 +334,13 @@ extern int nfs3_decode_dirent(struct xdr_stream *,
#if IS_ENABLED(CONFIG_NFS_V4)
extern int nfs4_decode_dirent(struct xdr_stream *,
struct nfs_entry *, bool);
#endif
#ifdef CONFIG_NFS_V4_1
extern const u32 nfs41_maxread_overhead;
extern const u32 nfs41_maxwrite_overhead;
extern const u32 nfs41_maxgetdevinfo_overhead;
#endif
/* nfs4proc.c */
#if IS_ENABLED(CONFIG_NFS_V4)
extern const struct rpc_procinfo nfs4_procedures[];
#endif
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
@@ -639,7 +635,7 @@ void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio);
int nfs_filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
#ifdef CONFIG_NFS_V4_1
#ifdef CONFIG_NFS_V4
static inline void
pnfs_bucket_clear_pnfs_ds_commit_verifiers(struct pnfs_commit_bucket *buckets,
unsigned int nbuckets)
@@ -660,12 +656,12 @@ void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
array->nbuckets);
rcu_read_unlock();
}
#else
#else /* CONFIG_NFS_V4 */
static inline
void nfs_clear_pnfs_ds_commit_verifiers(struct pnfs_ds_commit_info *cinfo)
{
}
#endif
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_MIGRATION
int nfs_migrate_folio(struct address_space *, struct folio *dst,
@@ -739,9 +735,6 @@ extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset);
/* nfs4proc.c */
extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
const struct nfs_client_initdata *);
extern int nfs40_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
const struct cred *cred);
extern int nfs41_walk_client_list(struct nfs_client *clp,
struct nfs_client **result,
const struct cred *cred);

View File

@@ -58,6 +58,11 @@ struct nfs_local_fsync_ctx {
static bool localio_enabled __read_mostly = true;
module_param(localio_enabled, bool, 0644);
static void nfs_local_do_read(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops);
static void nfs_local_do_write(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops);
static inline bool nfs_client_is_local(const struct nfs_client *clp)
{
return !!rcu_access_pointer(clp->cl_uuid.net);
@@ -286,6 +291,18 @@ nfs_local_open_fh(struct nfs_client *clp, const struct cred *cred,
}
EXPORT_SYMBOL_GPL(nfs_local_open_fh);
/*
* Ensure all page cache allocations are done from GFP_NOFS context to
* prevent direct reclaim recursion back into NFS via nfs_writepages.
*/
static void
nfs_local_mapping_set_gfp_nofs_context(struct address_space *m)
{
gfp_t gfp_mask = mapping_gfp_mask(m);
mapping_set_gfp_mask(m, (gfp_mask & ~(__GFP_FS)));
}
static void
nfs_local_iocb_free(struct nfs_local_kiocb *iocb)
{
@@ -310,6 +327,7 @@ nfs_local_iocb_alloc(struct nfs_pgio_header *hdr,
return NULL;
}
nfs_local_mapping_set_gfp_nofs_context(file->f_mapping);
init_sync_kiocb(&iocb->kiocb, file);
iocb->hdr = hdr;
@@ -512,8 +530,7 @@ nfs_local_pgio_init(struct nfs_pgio_header *hdr,
hdr->task.tk_start = ktime_get();
}
static bool
nfs_local_pgio_done(struct nfs_local_kiocb *iocb, long status, bool force)
static bool nfs_local_pgio_done(struct nfs_local_kiocb *iocb, long status)
{
struct nfs_pgio_header *hdr = iocb->hdr;
@@ -528,9 +545,6 @@ nfs_local_pgio_done(struct nfs_local_kiocb *iocb, long status, bool force)
hdr->task.tk_status = status;
}
if (force)
return true;
BUG_ON(atomic_read(&iocb->n_iters) <= 0);
return atomic_dec_and_test(&iocb->n_iters);
}
@@ -542,13 +556,50 @@ nfs_local_iocb_release(struct nfs_local_kiocb *iocb)
nfs_local_iocb_free(iocb);
}
static void
nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
static void nfs_local_pgio_restart(struct nfs_local_kiocb *iocb,
struct nfs_pgio_header *hdr)
{
int status = 0;
iocb->kiocb.ki_pos = hdr->args.offset;
iocb->kiocb.ki_flags &= ~(IOCB_DSYNC | IOCB_SYNC | IOCB_DIRECT);
iocb->kiocb.ki_complete = NULL;
iocb->aio_complete_work = NULL;
iocb->end_iter_index = -1;
switch (hdr->rw_mode) {
case FMODE_READ:
nfs_local_iters_init(iocb, ITER_DEST);
nfs_local_do_read(iocb, hdr->task.tk_ops);
break;
case FMODE_WRITE:
nfs_local_iters_init(iocb, ITER_SOURCE);
nfs_local_do_write(iocb, hdr->task.tk_ops);
break;
default:
status = -EOPNOTSUPP;
}
if (unlikely(status != 0)) {
nfs_local_iocb_release(iocb);
hdr->task.tk_status = status;
nfs_local_hdr_release(hdr, hdr->task.tk_ops);
}
}
static void nfs_local_pgio_release(struct nfs_local_kiocb *iocb)
{
struct nfs_pgio_header *hdr = iocb->hdr;
struct rpc_task *task = &hdr->task;
nfs_local_iocb_release(iocb);
nfs_local_hdr_release(hdr, hdr->task.tk_ops);
task->tk_action = NULL;
task->tk_ops->rpc_call_done(task, hdr);
if (task->tk_action == NULL) {
nfs_local_iocb_release(iocb);
task->tk_ops->rpc_release(hdr);
} else
nfs_local_pgio_restart(iocb, hdr);
}
/*
@@ -609,7 +660,7 @@ static void nfs_local_read_aio_complete(struct kiocb *kiocb, long ret)
container_of(kiocb, struct nfs_local_kiocb, kiocb);
/* AIO completion of DIO read should always be last to complete */
if (unlikely(!nfs_local_pgio_done(iocb, ret, false)))
if (unlikely(!nfs_local_pgio_done(iocb, ret)))
return;
nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_read_aio_complete_work */
@@ -641,7 +692,7 @@ static void nfs_local_call_read(struct work_struct *work)
if (status == -EIOCBQUEUED)
continue;
/* Break on completion, errors, or short reads */
if (nfs_local_pgio_done(iocb, status, false) || status < 0 ||
if (nfs_local_pgio_done(iocb, status) || status < 0 ||
(size_t)status < iov_iter_count(&iocb->iters[i])) {
nfs_local_read_iocb_done(iocb);
break;
@@ -649,9 +700,8 @@ static void nfs_local_call_read(struct work_struct *work)
}
}
static int
nfs_local_do_read(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops)
static void nfs_local_do_read(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops)
{
struct nfs_pgio_header *hdr = iocb->hdr;
@@ -663,8 +713,6 @@ nfs_local_do_read(struct nfs_local_kiocb *iocb,
INIT_WORK(&iocb->work, nfs_local_call_read);
queue_work(nfslocaliod_workqueue, &iocb->work);
return 0;
}
static void
@@ -773,19 +821,7 @@ static void nfs_local_write_done(struct nfs_local_kiocb *iocb)
pr_info_ratelimited("nfs: Unexpected direct I/O write alignment failure\n");
}
/* Handle short writes as if they are ENOSPC */
status = hdr->res.count;
if (status > 0 && status < hdr->args.count) {
hdr->mds_offset += status;
hdr->args.offset += status;
hdr->args.pgbase += status;
hdr->args.count -= status;
nfs_set_pgio_error(hdr, -ENOSPC, hdr->args.offset);
status = -ENOSPC;
/* record -ENOSPC in terms of nfs_local_pgio_done */
(void) nfs_local_pgio_done(iocb, status, true);
}
if (hdr->task.tk_status < 0)
if (status < 0)
nfs_reset_boot_verifier(hdr->inode);
}
@@ -810,7 +846,7 @@ static void nfs_local_write_aio_complete(struct kiocb *kiocb, long ret)
container_of(kiocb, struct nfs_local_kiocb, kiocb);
/* AIO completion of DIO write should always be last to complete */
if (unlikely(!nfs_local_pgio_done(iocb, ret, false)))
if (unlikely(!nfs_local_pgio_done(iocb, ret)))
return;
nfs_local_pgio_aio_complete(iocb); /* Calls nfs_local_write_aio_complete_work */
@@ -846,7 +882,7 @@ static void nfs_local_call_write(struct work_struct *work)
if (status == -EIOCBQUEUED)
continue;
/* Break on completion, errors, or short writes */
if (nfs_local_pgio_done(iocb, status, false) || status < 0 ||
if (nfs_local_pgio_done(iocb, status) || status < 0 ||
(size_t)status < iov_iter_count(&iocb->iters[i])) {
nfs_local_write_iocb_done(iocb);
break;
@@ -857,9 +893,8 @@ static void nfs_local_call_write(struct work_struct *work)
current->flags = old_flags;
}
static int
nfs_local_do_write(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops)
static void nfs_local_do_write(struct nfs_local_kiocb *iocb,
const struct rpc_call_ops *call_ops)
{
struct nfs_pgio_header *hdr = iocb->hdr;
@@ -883,8 +918,6 @@ nfs_local_do_write(struct nfs_local_kiocb *iocb,
INIT_WORK(&iocb->work, nfs_local_call_write);
queue_work(nfslocaliod_workqueue, &iocb->work);
return 0;
}
static struct nfs_local_kiocb *
@@ -934,10 +967,10 @@ int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
switch (hdr->rw_mode) {
case FMODE_READ:
status = nfs_local_do_read(iocb, call_ops);
nfs_local_do_read(iocb, call_ops);
break;
case FMODE_WRITE:
status = nfs_local_do_write(iocb, call_ops);
nfs_local_do_write(iocb, call_ops);
break;
default:
dprintk("%s: invalid mode: %d\n", __func__,
@@ -945,9 +978,7 @@ int nfs_local_doio(struct nfs_client *clp, struct nfsd_file *localio,
status = -EOPNOTSUPP;
}
if (status != 0) {
if (status == -EAGAIN)
nfs_localio_disable_client(clp);
if (unlikely(status != 0)) {
nfs_local_iocb_release(iocb);
hdr->task.tk_status = status;
nfs_local_hdr_release(hdr, call_ops);
@@ -974,6 +1005,8 @@ nfs_local_run_commit(struct file *filp, struct nfs_commit_data *data)
end = LLONG_MAX;
}
nfs_local_mapping_set_gfp_nofs_context(filp->f_mapping);
dprintk("%s: commit %llu - %llu\n", __func__, start, end);
return vfs_fsync_range(filp, start, end, 0);
}
@@ -1015,17 +1048,22 @@ nfs_local_fsync_ctx_free(struct nfs_local_fsync_ctx *ctx)
static void
nfs_local_fsync_work(struct work_struct *work)
{
unsigned long old_flags = current->flags;
struct nfs_local_fsync_ctx *ctx;
int status;
ctx = container_of(work, struct nfs_local_fsync_ctx, work);
current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
status = nfs_local_run_commit(nfs_to->nfsd_file_file(ctx->localio),
ctx->data);
nfs_local_commit_done(ctx->data, status);
if (ctx->done != NULL)
complete(ctx->done);
nfs_local_fsync_ctx_free(ctx);
current->flags = old_flags;
}
static struct nfs_local_fsync_ctx *
@@ -1049,7 +1087,7 @@ int nfs_local_commit(struct nfsd_file *localio,
{
struct nfs_local_fsync_ctx *ctx;
ctx = nfs_local_fsync_ctx_alloc(data, localio, GFP_KERNEL);
ctx = nfs_local_fsync_ctx_alloc(data, localio, GFP_NOIO);
if (!ctx) {
nfs_local_commit_done(data, -ENOMEM);
nfs_local_release_commit_data(localio, data, call_ops);
@@ -1061,10 +1099,10 @@ int nfs_local_commit(struct nfsd_file *localio,
if (how & FLUSH_SYNC) {
DECLARE_COMPLETION_ONSTACK(done);
ctx->done = &done;
queue_work(nfsiod_workqueue, &ctx->work);
queue_work(nfslocaliod_workqueue, &ctx->work);
wait_for_completion(&done);
} else
queue_work(nfsiod_workqueue, &ctx->work);
queue_work(nfslocaliod_workqueue, &ctx->work);
return 0;
}

View File

@@ -31,11 +31,9 @@ struct nfs_net {
unsigned short nfs_callback_tcpport;
unsigned short nfs_callback_tcpport6;
int cb_users[NFS4_MAX_MINOR_VERSION + 1];
#endif /* CONFIG_NFS_V4 */
#if IS_ENABLED(CONFIG_NFS_V4_1)
struct list_head nfs4_data_server_cache;
spinlock_t nfs4_data_server_lock;
#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
struct nfs_netns_client *nfs_client;
spinlock_t nfs_client_lock;
ktime_t boot_time;

View File

@@ -1027,11 +1027,10 @@ static int nfs3_have_delegation(struct inode *inode, fmode_t type, int flags)
return 0;
}
static int nfs3_return_delegation(struct inode *inode)
static void nfs3_return_delegation(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
nfs_wb_all(inode);
return 0;
}
static const struct inode_operations nfs3_dir_inode_operations = {

19
fs/nfs/nfs40.h Normal file
View File

@@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_FS_NFS_NFS4_0_H
#define __LINUX_FS_NFS_NFS4_0_H
/* nfs40client.c */
void nfs40_shutdown_client(struct nfs_client *);
int nfs40_init_client(struct nfs_client *);
void nfs40_handle_cb_pathdown(struct nfs_client *clp);
/* nfs40proc.c */
extern const struct nfs4_minor_version_ops nfs_v4_0_minor_ops;
/* nfs40state.c */
int nfs40_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **result,
const struct cred *cred);
#endif /* __LINUX_FS_NFS_NFS4_0_H */

245
fs/nfs/nfs40client.c Normal file
View File

@@ -0,0 +1,245 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
#include "nfs4session.h"
#include "callback.h"
#include "delegation.h"
#include "internal.h"
#include "netns.h"
#include "nfs40.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
/*
* SETCLIENTID just did a callback update with the callback ident in
* "drop," but server trunking discovery claims "drop" and "keep" are
* actually the same server. Swap the callback IDs so that "keep"
* will continue to use the callback ident the server now knows about,
* and so that "keep"'s original callback ident is destroyed when
* "drop" is freed.
*/
static void nfs4_swap_callback_idents(struct nfs_client *keep,
struct nfs_client *drop)
{
struct nfs_net *nn = net_generic(keep->cl_net, nfs_net_id);
unsigned int save = keep->cl_cb_ident;
if (keep->cl_cb_ident == drop->cl_cb_ident)
return;
dprintk("%s: keeping callback ident %u and dropping ident %u\n",
__func__, keep->cl_cb_ident, drop->cl_cb_ident);
spin_lock(&nn->nfs_client_lock);
idr_replace(&nn->cb_ident_idr, keep, drop->cl_cb_ident);
keep->cl_cb_ident = drop->cl_cb_ident;
idr_replace(&nn->cb_ident_idr, drop, save);
drop->cl_cb_ident = save;
spin_unlock(&nn->nfs_client_lock);
}
static bool nfs4_same_verifier(nfs4_verifier *v1, nfs4_verifier *v2)
{
return memcmp(v1->data, v2->data, sizeof(v1->data)) == 0;
}
void nfs40_shutdown_client(struct nfs_client *clp)
{
if (clp->cl_slot_tbl) {
nfs4_shutdown_slot_table(clp->cl_slot_tbl);
kfree(clp->cl_slot_tbl);
}
}
/**
* nfs40_init_client - nfs_client initialization tasks for NFSv4.0
* @clp: nfs_client to initialize
*
* Returns zero on success, or a negative errno if some error occurred.
*/
int nfs40_init_client(struct nfs_client *clp)
{
struct nfs4_slot_table *tbl;
int ret;
tbl = kzalloc(sizeof(*tbl), GFP_NOFS);
if (tbl == NULL)
return -ENOMEM;
ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE,
"NFSv4.0 transport Slot table");
if (ret) {
nfs4_shutdown_slot_table(tbl);
kfree(tbl);
return ret;
}
clp->cl_slot_tbl = tbl;
return 0;
}
/*
* nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
* @clp: client to process
*
* Set the NFS4CLNT_LEASE_EXPIRED state in order to force a
* resend of the SETCLIENTID and hence re-establish the
* callback channel. Then return all existing delegations.
*/
void nfs40_handle_cb_pathdown(struct nfs_client *clp)
{
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs_expire_all_delegations(clp);
dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
clp->cl_hostname);
}
void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
{
nfs40_handle_cb_pathdown(clp);
nfs4_schedule_state_manager(clp);
}
/**
* nfs40_walk_client_list - Find server that recognizes a client ID
*
* @new: nfs_client with client ID to test
* @result: OUT: found nfs_client, or new
* @cred: credential to use for trunking test
*
* Returns zero, a negative errno, or a negative NFS4ERR status.
* If zero is returned, an nfs_client pointer is planted in "result."
*
* NB: nfs40_walk_client_list() relies on the new nfs_client being
* the last nfs_client on the list.
*/
static int nfs40_walk_client_list(struct nfs_client *new,
struct nfs_client **result,
const struct cred *cred)
{
struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
struct nfs_client *pos, *prev = NULL;
struct nfs4_setclientid_res clid = {
.clientid = new->cl_clientid,
.confirm = new->cl_confirm,
};
int status = -NFS4ERR_STALE_CLIENTID;
spin_lock(&nn->nfs_client_lock);
list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
if (pos == new)
goto found;
status = nfs4_match_client(pos, new, &prev, nn);
if (status < 0)
goto out_unlock;
if (status != 0)
continue;
/*
* We just sent a new SETCLIENTID, which should have
* caused the server to return a new cl_confirm. So if
* cl_confirm is the same, then this is a different
* server that just returned the same cl_confirm by
* coincidence:
*/
if ((new != pos) && nfs4_same_verifier(&pos->cl_confirm,
&new->cl_confirm))
continue;
/*
* But if the cl_confirm's are different, then the only
* way that a SETCLIENTID_CONFIRM to pos can succeed is
* if new and pos point to the same server:
*/
found:
refcount_inc(&pos->cl_count);
spin_unlock(&nn->nfs_client_lock);
nfs_put_client(prev);
prev = pos;
status = nfs4_proc_setclientid_confirm(pos, &clid, cred);
switch (status) {
case -NFS4ERR_STALE_CLIENTID:
break;
case 0:
nfs4_swap_callback_idents(pos, new);
pos->cl_confirm = new->cl_confirm;
nfs_mark_client_ready(pos, NFS_CS_READY);
prev = NULL;
*result = pos;
goto out;
case -ERESTARTSYS:
case -ETIMEDOUT:
/* The callback path may have been inadvertently
* changed. Schedule recovery!
*/
nfs4_schedule_path_down_recovery(pos);
goto out;
default:
goto out;
}
spin_lock(&nn->nfs_client_lock);
}
out_unlock:
spin_unlock(&nn->nfs_client_lock);
/* No match found. The server lost our clientid */
out:
nfs_put_client(prev);
return status;
}
/**
* nfs40_discover_server_trunking - Detect server IP address trunking (mv0)
*
* @clp: nfs_client under test
* @result: OUT: found nfs_client, or clp
* @cred: credential to use for trunking test
*
* Returns zero, a negative errno, or a negative NFS4ERR status.
* If zero is returned, an nfs_client pointer is planted in
* "result".
*
* Note: The returned client may not yet be marked ready.
*/
int nfs40_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **result,
const struct cred *cred)
{
struct nfs4_setclientid_res clid = {
.clientid = clp->cl_clientid,
.confirm = clp->cl_confirm,
};
struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
unsigned short port;
int status;
port = nn->nfs_callback_tcpport;
if (clp->cl_addr.ss_family == AF_INET6)
port = nn->nfs_callback_tcpport6;
status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
if (status != 0)
goto out;
clp->cl_clientid = clid.clientid;
clp->cl_confirm = clid.confirm;
status = nfs40_walk_client_list(clp, result, cred);
if (status == 0) {
/* Sustain the lease, even if it's empty. If the clientid4
* goes stale it's of no use for trunking discovery. */
nfs4_schedule_state_renewal(*result);
/* If the client state need to recover, do it. */
if (clp->cl_state)
nfs4_schedule_state_manager(clp);
}
out:
return status;
}

395
fs/nfs/nfs40proc.c Normal file
View File

@@ -0,0 +1,395 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/nfs4.h>
#include <linux/nfs.h>
#include <linux/sunrpc/sched.h>
#include <linux/nfs_fs.h>
#include "internal.h"
#include "nfs4_fs.h"
#include "nfs40.h"
#include "nfs4session.h"
#include "nfs4trace.h"
static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_call_sync_data *data = calldata;
nfs4_setup_sequence(data->seq_server->nfs_client,
data->seq_args, data->seq_res, task);
}
static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
{
struct nfs4_call_sync_data *data = calldata;
nfs4_sequence_done(task, data->seq_res);
}
static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
{
struct nfs4_slot *slot = res->sr_slot;
struct nfs4_slot_table *tbl;
tbl = slot->table;
spin_lock(&tbl->slot_tbl_lock);
if (!nfs41_wake_and_assign_slot(tbl, slot))
nfs4_free_slot(tbl, slot);
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
}
static int nfs40_sequence_done(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
if (res->sr_slot != NULL)
nfs40_sequence_free_slot(res);
return 1;
}
static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
{
if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
nfs_finish_clear_delegation_stateid(state, NULL);
}
static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
{
/* NFSv4.0 doesn't allow for delegation recovery on open expire */
nfs40_clear_delegation_stateid(state);
nfs_state_clear_open_state_flags(state);
return nfs4_open_expired(sp, state);
}
struct nfs4_renewdata {
struct nfs_client *client;
unsigned long timestamp;
};
/*
* nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
* standalone procedure for queueing an asynchronous RENEW.
*/
static void nfs4_renew_release(void *calldata)
{
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
if (refcount_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(data);
}
static void nfs4_renew_done(struct rpc_task *task, void *calldata)
{
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
unsigned long timestamp = data->timestamp;
trace_nfs4_renew_async(clp, task->tk_status);
switch (task->tk_status) {
case 0:
break;
case -NFS4ERR_LEASE_MOVED:
nfs4_schedule_lease_moved_recovery(clp);
break;
default:
/* Unless we're shutting down, schedule state recovery! */
if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
return;
if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
nfs4_schedule_lease_recovery(clp);
return;
}
nfs4_schedule_path_down_recovery(clp);
}
do_renew_lease(clp, timestamp);
}
static const struct rpc_call_ops nfs4_renew_ops = {
.rpc_call_done = nfs4_renew_done,
.rpc_release = nfs4_renew_release,
};
static int nfs4_proc_async_renew(struct nfs_client *clp, const struct cred *cred, unsigned renew_flags)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
struct nfs4_renewdata *data;
if (renew_flags == 0)
return 0;
if (!refcount_inc_not_zero(&clp->cl_count))
return -EIO;
data = kmalloc(sizeof(*data), GFP_NOFS);
if (data == NULL) {
nfs_put_client(clp);
return -ENOMEM;
}
data->client = clp;
data->timestamp = jiffies;
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
&nfs4_renew_ops, data);
}
static int nfs4_proc_renew(struct nfs_client *clp, const struct cred *cred)
{
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
.rpc_argp = clp,
.rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
if (status < 0)
return status;
do_renew_lease(clp, now);
return 0;
}
static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
nfs4_stateid *stateid,
const struct cred *cred)
{
return -NFS4ERR_BAD_STATEID;
}
/*
* This operation also signals the server that this client is
* performing migration recovery. The server can stop returning
* NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
* appended to this compound to identify the client ID which is
* performing recovery.
*/
static int _nfs40_proc_get_locations(struct nfs_server *server,
struct nfs_fh *fhandle,
struct nfs4_fs_locations *locations,
struct page *page, const struct cred *cred)
{
struct rpc_clnt *clnt = server->client;
struct nfs_client *clp = server->nfs_client;
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
struct nfs4_fs_locations_arg args = {
.clientid = clp->cl_clientid,
.fh = fhandle,
.page = page,
.bitmask = bitmask,
.migration = 1, /* skip LOOKUP */
.renew = 1, /* append RENEW */
};
struct nfs4_fs_locations_res res = {
.fs_locations = locations,
.migration = 1,
.renew = 1,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
nfs_fattr_init(locations->fattr);
locations->server = server;
locations->nlocations = 0;
nfs4_init_sequence(clp, &args.seq_args, &res.seq_res, 0, 1);
status = nfs4_call_sync_sequence(clnt, server, &msg,
&args.seq_args, &res.seq_res);
if (status)
return status;
renew_lease(server, now);
return 0;
}
/*
* This operation also signals the server that this client is
* performing "lease moved" recovery. The server can stop
* returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
* is appended to this compound to identify the client ID which is
* performing recovery.
*/
static int _nfs40_proc_fsid_present(struct inode *inode, const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct rpc_clnt *clnt = server->client;
struct nfs4_fsid_present_arg args = {
.fh = NFS_FH(inode),
.clientid = clp->cl_clientid,
.renew = 1, /* append RENEW */
};
struct nfs4_fsid_present_res res = {
.renew = 1,
};
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
.rpc_argp = &args,
.rpc_resp = &res,
.rpc_cred = cred,
};
unsigned long now = jiffies;
int status;
res.fh = nfs_alloc_fhandle();
if (res.fh == NULL)
return -ENOMEM;
nfs4_init_sequence(clp, &args.seq_args, &res.seq_res, 0, 1);
status = nfs4_call_sync_sequence(clnt, server, &msg,
&args.seq_args, &res.seq_res);
nfs_free_fhandle(res.fh);
if (status)
return status;
do_renew_lease(clp, now);
return 0;
}
struct nfs_release_lockowner_data {
struct nfs4_lock_state *lsp;
struct nfs_server *server;
struct nfs_release_lockowner_args args;
struct nfs_release_lockowner_res res;
unsigned long timestamp;
};
static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
{
struct nfs_release_lockowner_data *data = calldata;
struct nfs_server *server = data->server;
nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
&data->res.seq_res, task);
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
data->timestamp = jiffies;
}
static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
{
struct nfs_release_lockowner_data *data = calldata;
struct nfs_server *server = data->server;
nfs40_sequence_done(task, &data->res.seq_res);
switch (task->tk_status) {
case 0:
renew_lease(server, data->timestamp);
break;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED:
nfs4_schedule_lease_recovery(server->nfs_client);
break;
case -NFS4ERR_LEASE_MOVED:
case -NFS4ERR_DELAY:
if (nfs4_async_handle_error(task, server,
NULL, NULL) == -EAGAIN)
rpc_restart_call_prepare(task);
}
}
static void nfs4_release_lockowner_release(void *calldata)
{
struct nfs_release_lockowner_data *data = calldata;
nfs4_free_lock_state(data->server, data->lsp);
kfree(calldata);
}
static const struct rpc_call_ops nfs4_release_lockowner_ops = {
.rpc_call_prepare = nfs4_release_lockowner_prepare,
.rpc_call_done = nfs4_release_lockowner_done,
.rpc_release = nfs4_release_lockowner_release,
};
static void
nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
{
struct nfs_release_lockowner_data *data;
struct nfs_client *clp = server->nfs_client;
struct rpc_message msg = {
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
};
if (clp->cl_mvops->minor_version != 0)
return;
data = kmalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return;
data->lsp = lsp;
data->server = server;
data->args.lock_owner.clientid = clp->cl_clientid;
data->args.lock_owner.id = lsp->ls_seqid.owner_id;
data->args.lock_owner.s_dev = server->s_dev;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
nfs4_init_sequence(clp, &data->args.seq_args, &data->res.seq_res, 0, 0);
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
}
static const struct rpc_call_ops nfs40_call_sync_ops = {
.rpc_call_prepare = nfs40_call_sync_prepare,
.rpc_call_done = nfs40_call_sync_done,
};
static const struct nfs4_sequence_slot_ops nfs40_sequence_slot_ops = {
.process = nfs40_sequence_done,
.done = nfs40_sequence_done,
.free_slot = nfs40_sequence_free_slot,
};
static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
.establish_clid = nfs4_init_clientid,
.detect_trunking = nfs40_discover_server_trunking,
};
static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
.owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs40_open_expired,
.recover_lock = nfs4_lock_expired,
.establish_clid = nfs4_init_clientid,
};
static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
.sched_state_renewal = nfs4_proc_async_renew,
.get_state_renewal_cred = nfs4_get_renew_cred,
.renew_lease = nfs4_proc_renew,
};
static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
.get_locations = _nfs40_proc_get_locations,
.fsid_present = _nfs40_proc_fsid_present,
};
const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
.minor_version = 0,
.init_caps = NFS_CAP_READDIRPLUS
| NFS_CAP_ATOMIC_OPEN
| NFS_CAP_POSIX_LOCK,
.init_client = nfs40_init_client,
.shutdown_client = nfs40_shutdown_client,
.match_stateid = nfs4_match_stateid,
.find_root_sec = nfs4_find_root_sec,
.free_lock_state = nfs4_release_lockowner,
.test_and_free_expired = nfs40_test_and_free_expired_stateid,
.alloc_seqid = nfs_alloc_seqid,
.call_sync_ops = &nfs40_call_sync_ops,
.sequence_slot_ops = &nfs40_sequence_slot_ops,
.reboot_recovery_ops = &nfs40_reboot_recovery_ops,
.nograce_recovery_ops = &nfs40_nograce_recovery_ops,
.state_renewal_ops = &nfs40_state_renewal_ops,
.mig_recovery_ops = &nfs40_mig_recovery_ops,
};

View File

@@ -670,8 +670,8 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
task_setup_data.callback_data = data;
nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
1, 0);
nfs4_init_sequence(dst_server->nfs_client, &data->args.osa_seq_args,
&data->res.osr_seq_res, 1, 0);
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1072,7 +1072,8 @@ int nfs42_proc_layoutstats_generic(struct nfs_server *server,
nfs42_layoutstat_release(data);
return -EAGAIN;
}
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
nfs4_init_sequence(server->nfs_client, &data->args.seq_args,
&data->res.seq_res, 0, 0);
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1210,6 +1211,7 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
const struct nfs42_layout_error *errors, size_t n)
{
struct inode *inode = lseg->pls_layout->plh_inode;
struct nfs_server *server = NFS_SERVER(inode);
struct nfs42_layouterror_data *data;
struct rpc_task *task;
struct rpc_message msg = {
@@ -1237,8 +1239,9 @@ int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
task_setup.callback_data = data;
task_setup.rpc_client = NFS_SERVER(inode)->client;
nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
task_setup.rpc_client = server->client;
nfs4_init_sequence(server->nfs_client, &data->args.seq_args,
&data->res.seq_res, 0, 0);
task = rpc_run_task(&task_setup);
if (IS_ERR(task))
return PTR_ERR(task);

View File

@@ -10,12 +10,16 @@
#ifndef __LINUX_FS_NFS_NFS4_FS_H
#define __LINUX_FS_NFS_NFS4_FS_H
#if defined(CONFIG_NFS_V4_0)
#define NFS4_MIN_MINOR_VERSION 0
#else
#define NFS4_MIN_MINOR_VERSION 1
#endif
#if defined(CONFIG_NFS_V4_2)
#define NFS4_MAX_MINOR_VERSION 2
#elif defined(CONFIG_NFS_V4_1)
#define NFS4_MAX_MINOR_VERSION 1
#else
#define NFS4_MAX_MINOR_VERSION 0
#define NFS4_MAX_MINOR_VERSION 1
#endif
#if IS_ENABLED(CONFIG_NFS_V4)
@@ -73,6 +77,7 @@ struct nfs4_minor_version_ops {
void (*session_trunk)(struct rpc_clnt *clnt,
struct rpc_xprt *xprt, void *data);
const struct rpc_call_ops *call_sync_ops;
const struct nfs4_sequence_slot_ops *sequence_slot_ops;
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
const struct nfs4_state_maintenance_ops *state_renewal_ops;
@@ -256,6 +261,12 @@ struct nfs4_add_xprt_data {
const struct cred *cred;
};
struct nfs4_sequence_slot_ops {
int (*process)(struct rpc_task *, struct nfs4_sequence_res *);
int (*done)(struct rpc_task *, struct nfs4_sequence_res *);
void (*free_slot)(struct nfs4_sequence_res *);
};
struct nfs4_state_maintenance_ops {
int (*sched_state_renewal)(struct nfs_client *, const struct cred *, unsigned);
const struct cred * (*get_state_renewal_cred)(struct nfs_client *);
@@ -277,6 +288,11 @@ int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
/* fs_context.c */
extern struct file_system_type nfs4_fs_type;
/* nfs4client.c */
struct nfs_net;
int nfs4_match_client(struct nfs_client *pos, struct nfs_client *new,
struct nfs_client **prev, struct nfs_net *nn);
/* nfs4namespace.c */
struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *,
const struct qstr *);
@@ -286,6 +302,12 @@ int nfs4_replace_transport(struct nfs_server *server,
size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr_storage *ss,
size_t salen, struct net *net, int port);
/* nfs4proc.c */
struct nfs4_call_sync_data {
const struct nfs_server *seq_server;
struct nfs4_sequence_args *seq_args;
struct nfs4_sequence_res *seq_res;
};
extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *);
extern int nfs4_async_handle_error(struct rpc_task *task,
struct nfs_server *server,
@@ -293,9 +315,16 @@ extern int nfs4_async_handle_error(struct rpc_task *task,
extern int nfs4_call_sync(struct rpc_clnt *, struct nfs_server *,
struct rpc_message *, struct nfs4_sequence_args *,
struct nfs4_sequence_res *, int);
extern void nfs4_init_sequence(struct nfs4_sequence_args *, struct nfs4_sequence_res *, int, int);
extern int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
struct nfs_server *server,
struct rpc_message *msg,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res);
extern void nfs4_init_sequence(struct nfs_client *clp, struct nfs4_sequence_args *,
struct nfs4_sequence_res *, int, int);
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, const struct cred *, struct nfs4_setclientid_res *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, const struct cred *);
extern void renew_lease(const struct nfs_server *server, unsigned long timestamp);
extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, bool);
extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, const struct cred *cred);
@@ -339,8 +368,19 @@ extern void nfs4_update_changeattr(struct inode *dir,
unsigned long cache_validity);
extern int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen,
struct page **pages);
extern int nfs4_open_reclaim(struct nfs4_state_owner *, struct nfs4_state *);
extern int nfs4_open_expired(struct nfs4_state_owner *, struct nfs4_state *);
extern int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request);
extern int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request);
extern void nfs_state_clear_delegation(struct nfs4_state *state);
extern void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
const nfs4_stateid *stateid);
extern void nfs_state_clear_open_state_flags(struct nfs4_state *state);
extern void do_renew_lease(struct nfs_client *clp, unsigned long timestamp);
extern bool nfs4_match_stateid(const nfs4_stateid *s1, const nfs4_stateid *s2);
extern int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fattr *fattr);
#if defined(CONFIG_NFS_V4_1)
extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *);
extern int nfs4_proc_create_session(struct nfs_client *, const struct cred *);
extern int nfs4_proc_destroy_session(struct nfs4_session *, const struct cred *);
@@ -418,31 +458,6 @@ nfs4_state_protect_write(struct nfs_client *clp, struct rpc_clnt **clntp,
!test_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags))
hdr->args.stable = NFS_FILE_SYNC;
}
#else /* CONFIG_NFS_v4_1 */
static inline bool
is_ds_only_client(struct nfs_client *clp)
{
return false;
}
static inline bool
is_ds_client(struct nfs_client *clp)
{
return false;
}
static inline void
nfs4_state_protect(struct nfs_client *clp, unsigned long sp4_flags,
struct rpc_clnt **clntp, struct rpc_message *msg)
{
}
static inline void
nfs4_state_protect_write(struct nfs_client *clp, struct rpc_clnt **clntp,
struct rpc_message *msg, struct nfs_pgio_header *hdr)
{
}
#endif /* CONFIG_NFS_V4_1 */
extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
@@ -452,9 +467,7 @@ extern const u32 nfs4_pathconf_bitmap[3];
extern const u32 nfs4_fsinfo_bitmap[3];
extern const u32 nfs4_fs_locations_bitmap[3];
void nfs40_shutdown_client(struct nfs_client *);
void nfs41_shutdown_client(struct nfs_client *);
int nfs40_init_client(struct nfs_client *);
int nfs41_init_client(struct nfs_client *);
void nfs4_free_client(struct nfs_client *);
@@ -464,8 +477,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *);
extern void nfs4_schedule_state_renewal(struct nfs_client *);
extern void nfs4_kill_renewd(struct nfs_client *);
extern void nfs4_renew_state(struct work_struct *);
extern void nfs4_set_lease_period(struct nfs_client *clp, unsigned long lease);
extern void nfs4_set_lease_period(struct nfs_client *clp, u32 period);
/* nfs4state.c */
extern const nfs4_stateid current_stateid;
@@ -477,18 +489,12 @@ int nfs4_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **);
int nfs40_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **, const struct cred *);
#if defined(CONFIG_NFS_V4_1)
int nfs41_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **, const struct cred *);
extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
extern void nfs41_notify_server(struct nfs_client *);
bool nfs4_check_serverowner_major_id(struct nfs41_server_owner *o1,
struct nfs41_server_owner *o2);
#else
static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
{
}
#endif /* CONFIG_NFS_V4_1 */
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);

View File

@@ -44,7 +44,6 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
return ret < 0 ? ret : 0;
}
#ifdef CONFIG_NFS_V4_1
/*
* Per auth flavor data server rpc clients
*/
@@ -187,15 +186,6 @@ void nfs41_shutdown_client(struct nfs_client *clp)
}
}
#endif /* CONFIG_NFS_V4_1 */
void nfs40_shutdown_client(struct nfs_client *clp)
{
if (clp->cl_slot_tbl) {
nfs4_shutdown_slot_table(clp->cl_slot_tbl);
kfree(clp->cl_slot_tbl);
}
}
struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
{
@@ -211,7 +201,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
if (err)
goto error;
if (cl_init->minorversion > NFS4_MAX_MINOR_VERSION) {
if (cl_init->minorversion < NFS4_MIN_MINOR_VERSION ||
cl_init->minorversion > NFS4_MAX_MINOR_VERSION) {
err = -EINVAL;
goto error;
}
@@ -224,9 +215,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
clp->cl_mig_gen = 1;
clp->cl_last_renewal = jiffies;
#if IS_ENABLED(CONFIG_NFS_V4_1)
init_waitqueue_head(&clp->cl_lock_waitq);
#endif
INIT_LIST_HEAD(&clp->pending_cb_stateids);
if (cl_init->minorversion != 0)
@@ -339,35 +328,6 @@ static int nfs4_init_callback(struct nfs_client *clp)
return 0;
}
/**
* nfs40_init_client - nfs_client initialization tasks for NFSv4.0
* @clp: nfs_client to initialize
*
* Returns zero on success, or a negative errno if some error occurred.
*/
int nfs40_init_client(struct nfs_client *clp)
{
struct nfs4_slot_table *tbl;
int ret;
tbl = kzalloc(sizeof(*tbl), GFP_NOFS);
if (tbl == NULL)
return -ENOMEM;
ret = nfs4_setup_slot_table(tbl, NFS4_MAX_SLOT_TABLE,
"NFSv4.0 transport Slot table");
if (ret) {
nfs4_shutdown_slot_table(tbl);
kfree(tbl);
return ret;
}
clp->cl_slot_tbl = tbl;
return 0;
}
#if defined(CONFIG_NFS_V4_1)
/**
* nfs41_init_client - nfs_client initialization tasks for NFSv4.1+
* @clp: nfs_client to initialize
@@ -399,8 +359,6 @@ int nfs41_init_client(struct nfs_client *clp)
return 0;
}
#endif /* CONFIG_NFS_V4_1 */
/*
* Initialize the minor version specific parts of an NFS4 client record
*/
@@ -490,37 +448,6 @@ error:
return ERR_PTR(error);
}
/*
* SETCLIENTID just did a callback update with the callback ident in
* "drop," but server trunking discovery claims "drop" and "keep" are
* actually the same server. Swap the callback IDs so that "keep"
* will continue to use the callback ident the server now knows about,
* and so that "keep"'s original callback ident is destroyed when
* "drop" is freed.
*/
static void nfs4_swap_callback_idents(struct nfs_client *keep,
struct nfs_client *drop)
{
struct nfs_net *nn = net_generic(keep->cl_net, nfs_net_id);
unsigned int save = keep->cl_cb_ident;
if (keep->cl_cb_ident == drop->cl_cb_ident)
return;
dprintk("%s: keeping callback ident %u and dropping ident %u\n",
__func__, keep->cl_cb_ident, drop->cl_cb_ident);
spin_lock(&nn->nfs_client_lock);
idr_replace(&nn->cb_ident_idr, keep, drop->cl_cb_ident);
keep->cl_cb_ident = drop->cl_cb_ident;
idr_replace(&nn->cb_ident_idr, drop, save);
drop->cl_cb_ident = save;
spin_unlock(&nn->nfs_client_lock);
}
static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
const struct nfs_client *clp2)
{
@@ -529,13 +456,8 @@ static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
}
static bool nfs4_same_verifier(nfs4_verifier *v1, nfs4_verifier *v2)
{
return memcmp(v1->data, v2->data, sizeof(v1->data)) == 0;
}
static int nfs4_match_client(struct nfs_client *pos, struct nfs_client *new,
struct nfs_client **prev, struct nfs_net *nn)
int nfs4_match_client(struct nfs_client *pos, struct nfs_client *new,
struct nfs_client **prev, struct nfs_net *nn)
{
int status;
@@ -578,99 +500,6 @@ static int nfs4_match_client(struct nfs_client *pos, struct nfs_client *new,
return 0;
}
/**
* nfs40_walk_client_list - Find server that recognizes a client ID
*
* @new: nfs_client with client ID to test
* @result: OUT: found nfs_client, or new
* @cred: credential to use for trunking test
*
* Returns zero, a negative errno, or a negative NFS4ERR status.
* If zero is returned, an nfs_client pointer is planted in "result."
*
* NB: nfs40_walk_client_list() relies on the new nfs_client being
* the last nfs_client on the list.
*/
int nfs40_walk_client_list(struct nfs_client *new,
struct nfs_client **result,
const struct cred *cred)
{
struct nfs_net *nn = net_generic(new->cl_net, nfs_net_id);
struct nfs_client *pos, *prev = NULL;
struct nfs4_setclientid_res clid = {
.clientid = new->cl_clientid,
.confirm = new->cl_confirm,
};
int status = -NFS4ERR_STALE_CLIENTID;
spin_lock(&nn->nfs_client_lock);
list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) {
if (pos == new)
goto found;
status = nfs4_match_client(pos, new, &prev, nn);
if (status < 0)
goto out_unlock;
if (status != 0)
continue;
/*
* We just sent a new SETCLIENTID, which should have
* caused the server to return a new cl_confirm. So if
* cl_confirm is the same, then this is a different
* server that just returned the same cl_confirm by
* coincidence:
*/
if ((new != pos) && nfs4_same_verifier(&pos->cl_confirm,
&new->cl_confirm))
continue;
/*
* But if the cl_confirm's are different, then the only
* way that a SETCLIENTID_CONFIRM to pos can succeed is
* if new and pos point to the same server:
*/
found:
refcount_inc(&pos->cl_count);
spin_unlock(&nn->nfs_client_lock);
nfs_put_client(prev);
prev = pos;
status = nfs4_proc_setclientid_confirm(pos, &clid, cred);
switch (status) {
case -NFS4ERR_STALE_CLIENTID:
break;
case 0:
nfs4_swap_callback_idents(pos, new);
pos->cl_confirm = new->cl_confirm;
nfs_mark_client_ready(pos, NFS_CS_READY);
prev = NULL;
*result = pos;
goto out;
case -ERESTARTSYS:
case -ETIMEDOUT:
/* The callback path may have been inadvertently
* changed. Schedule recovery!
*/
nfs4_schedule_path_down_recovery(pos);
goto out;
default:
goto out;
}
spin_lock(&nn->nfs_client_lock);
}
out_unlock:
spin_unlock(&nn->nfs_client_lock);
/* No match found. The server lost our clientid */
out:
nfs_put_client(prev);
return status;
}
#ifdef CONFIG_NFS_V4_1
/*
* Returns true if the server major ids match
*/
@@ -799,7 +628,6 @@ out:
nfs_put_client(prev);
return status;
}
#endif /* CONFIG_NFS_V4_1 */
static void nfs4_destroy_server(struct nfs_server *server)
{
@@ -831,7 +659,6 @@ nfs4_find_client_ident(struct net *net, int cb_ident)
return clp;
}
#if defined(CONFIG_NFS_V4_1)
/* Common match routine for v4.0 and v4.1 callback services */
static bool nfs4_cb_match_client(const struct sockaddr *addr,
struct nfs_client *clp, u32 minorversion)
@@ -889,16 +716,6 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
return NULL;
}
#else /* CONFIG_NFS_V4_1 */
struct nfs_client *
nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
struct nfs4_sessionid *sid, u32 minorversion)
{
return NULL;
}
#endif /* CONFIG_NFS_V4_1 */
/*
* Set up an NFS4 client
*/
@@ -1040,7 +857,6 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
*/
static void nfs4_session_limit_rwsize(struct nfs_server *server)
{
#ifdef CONFIG_NFS_V4_1
struct nfs4_session *sess;
u32 server_resp_sz;
u32 server_rqst_sz;
@@ -1057,7 +873,6 @@ static void nfs4_session_limit_rwsize(struct nfs_server *server)
server->rsize = server_resp_sz;
if (server->wsize > server_rqst_sz)
server->wsize = server_rqst_sz;
#endif /* CONFIG_NFS_V4_1 */
}
/*

File diff suppressed because it is too large Load Diff

View File

@@ -133,15 +133,24 @@ nfs4_kill_renewd(struct nfs_client *clp)
cancel_delayed_work_sync(&clp->cl_renewd);
}
#define MAX_LEASE_PERIOD (60 * 60) /* 1 hour */
/**
* nfs4_set_lease_period - Sets the lease period on a nfs_client
*
* @clp: pointer to nfs_client
* @lease: new value for lease period
* @period: new value for lease period (in seconds)
*/
void nfs4_set_lease_period(struct nfs_client *clp,
unsigned long lease)
void nfs4_set_lease_period(struct nfs_client *clp, u32 period)
{
unsigned long lease;
/* Limit the lease period */
if (period < MAX_LEASE_PERIOD)
lease = period * HZ;
else
lease = MAX_LEASE_PERIOD * HZ;
spin_lock(&clp->cl_lock);
clp->cl_lease_time = lease;
spin_unlock(&clp->cl_lock);

View File

@@ -408,8 +408,6 @@ void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
}
}
#if defined(CONFIG_NFS_V4_1)
static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl,
u32 target_highest_slotid)
{
@@ -653,5 +651,3 @@ int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
return 0;
}
EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
#endif /* defined(CONFIG_NFS_V4_1) */

View File

@@ -111,7 +111,6 @@ static inline struct nfs4_session *nfs4_get_session(const struct nfs_client *clp
return clp->cl_session;
}
#if defined(CONFIG_NFS_V4_1)
extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
u32 target_highest_slotid);
extern void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
@@ -154,28 +153,6 @@ static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
*/
#define nfs_session_id_hash(sess_id) \
(~crc32_le(0xFFFFFFFF, &(sess_id)->data[0], sizeof((sess_id)->data)))
#else /* defined(CONFIG_NFS_V4_1) */
static inline int nfs4_init_session(struct nfs_client *clp)
{
return 0;
}
/*
* Determine if sessions are in use.
*/
static inline int nfs4_has_session(const struct nfs_client *clp)
{
return 0;
}
static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
{
return 0;
}
#define nfs_session_id_hash(session) (0)
#endif /* defined(CONFIG_NFS_V4_1) */
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
#endif /* __LINUX_FS_NFS_NFS4SESSION_H */

View File

@@ -54,6 +54,7 @@
#include <linux/sunrpc/clnt.h>
#include "nfs4_fs.h"
#include "nfs40.h"
#include "callback.h"
#include "delegation.h"
#include "internal.h"
@@ -103,7 +104,7 @@ static int nfs4_setup_state_renewal(struct nfs_client *clp)
status = nfs4_proc_get_lease_time(clp, &fsinfo);
if (status == 0) {
nfs4_set_lease_period(clp, fsinfo.lease_time * HZ);
nfs4_set_lease_period(clp, fsinfo.lease_time);
nfs4_schedule_state_renewal(clp);
}
@@ -142,55 +143,6 @@ out:
return status;
}
/**
* nfs40_discover_server_trunking - Detect server IP address trunking (mv0)
*
* @clp: nfs_client under test
* @result: OUT: found nfs_client, or clp
* @cred: credential to use for trunking test
*
* Returns zero, a negative errno, or a negative NFS4ERR status.
* If zero is returned, an nfs_client pointer is planted in
* "result".
*
* Note: The returned client may not yet be marked ready.
*/
int nfs40_discover_server_trunking(struct nfs_client *clp,
struct nfs_client **result,
const struct cred *cred)
{
struct nfs4_setclientid_res clid = {
.clientid = clp->cl_clientid,
.confirm = clp->cl_confirm,
};
struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
unsigned short port;
int status;
port = nn->nfs_callback_tcpport;
if (clp->cl_addr.ss_family == AF_INET6)
port = nn->nfs_callback_tcpport6;
status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred, &clid);
if (status != 0)
goto out;
clp->cl_clientid = clid.clientid;
clp->cl_confirm = clid.confirm;
status = nfs40_walk_client_list(clp, result, cred);
if (status == 0) {
/* Sustain the lease, even if it's empty. If the clientid4
* goes stale it's of no use for trunking discovery. */
nfs4_schedule_state_renewal(*result);
/* If the client state need to recover, do it. */
if (clp->cl_state)
nfs4_schedule_state_manager(clp);
}
out:
return status;
}
const struct cred *nfs4_get_machine_cred(struct nfs_client *clp)
{
return get_cred(rpc_machine_cred());
@@ -307,8 +259,6 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
return nfs4_drain_slot_tbl(&ses->fc_slot_table);
}
#if defined(CONFIG_NFS_V4_1)
static void nfs41_finish_session_reset(struct nfs_client *clp)
{
clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
@@ -387,8 +337,6 @@ int nfs41_discover_server_trunking(struct nfs_client *clp,
return status;
}
#endif /* CONFIG_NFS_V4_1 */
/**
* nfs4_get_clid_cred - Acquire credential for a setclientid operation
* @clp: client state handle
@@ -1343,28 +1291,6 @@ int nfs4_client_recover_expired_lease(struct nfs_client *clp)
return ret;
}
/*
* nfs40_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
* @clp: client to process
*
* Set the NFS4CLNT_LEASE_EXPIRED state in order to force a
* resend of the SETCLIENTID and hence re-establish the
* callback channel. Then return all existing delegations.
*/
static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
{
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs_expire_all_delegations(clp);
dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
clp->cl_hostname);
}
void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
{
nfs40_handle_cb_pathdown(clp);
nfs4_schedule_state_manager(clp);
}
static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
{
@@ -1874,9 +1800,11 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
switch (error) {
case 0:
break;
#if IS_ENABLED(CONFIG_NFS_V4_0)
case -NFS4ERR_CB_PATH_DOWN:
nfs40_handle_cb_pathdown(clp);
break;
#endif /* CONFIG_NFS_V4_0 */
case -NFS4ERR_NO_GRACE:
nfs4_state_end_reclaim_reboot(clp);
break;
@@ -2378,7 +2306,6 @@ out_unlock:
return status;
}
#ifdef CONFIG_NFS_V4_1
void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
{
struct nfs_client *clp = session->clp;
@@ -2585,18 +2512,6 @@ static void nfs4_layoutreturn_any_run(struct nfs_client *clp)
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
}
}
#else /* CONFIG_NFS_V4_1 */
static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
static int nfs4_bind_conn_to_session(struct nfs_client *clp)
{
return 0;
}
static void nfs4_layoutreturn_any_run(struct nfs_client *clp)
{
}
#endif /* CONFIG_NFS_V4_1 */
static void nfs4_state_manager(struct nfs_client *clp)
{

View File

@@ -14,7 +14,6 @@
#define CREATE_TRACE_POINTS
#include "nfs4trace.h"
#ifdef CONFIG_NFS_V4_1
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs4_pnfs_read);
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs4_pnfs_write);
EXPORT_TRACEPOINT_SYMBOL_GPL(nfs4_pnfs_commit_ds);
@@ -39,4 +38,3 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_unreg);
EXPORT_TRACEPOINT_SYMBOL_GPL(bl_pr_key_unreg_err);
EXPORT_TRACEPOINT_SYMBOL_GPL(fl_getdevinfo);
#endif

View File

@@ -71,7 +71,6 @@ DEFINE_NFS4_CLIENTID_EVENT(nfs4_setclientid);
DEFINE_NFS4_CLIENTID_EVENT(nfs4_setclientid_confirm);
DEFINE_NFS4_CLIENTID_EVENT(nfs4_renew);
DEFINE_NFS4_CLIENTID_EVENT(nfs4_renew_async);
#ifdef CONFIG_NFS_V4_1
DEFINE_NFS4_CLIENTID_EVENT(nfs4_exchange_id);
DEFINE_NFS4_CLIENTID_EVENT(nfs4_create_session);
DEFINE_NFS4_CLIENTID_EVENT(nfs4_destroy_session);
@@ -302,8 +301,6 @@ TRACE_EVENT(pnfs_ds_connect,
)
);
#endif /* CONFIG_NFS_V4_1 */
TRACE_EVENT(nfs4_setup_sequence,
TP_PROTO(
const struct nfs4_session *session,
@@ -990,14 +987,11 @@ DEFINE_NFS4_SET_DELEGATION_EVENT(nfs4_detach_delegation);
#define show_delegation_flags(flags) \
__print_flags(flags, "|", \
{ BIT(NFS_DELEGATION_NEED_RECLAIM), "NEED_RECLAIM" }, \
{ BIT(NFS_DELEGATION_RETURN), "RETURN" }, \
{ BIT(NFS_DELEGATION_RETURN_IF_CLOSED), "RETURN_IF_CLOSED" }, \
{ BIT(NFS_DELEGATION_REFERENCED), "REFERENCED" }, \
{ BIT(NFS_DELEGATION_RETURNING), "RETURNING" }, \
{ BIT(NFS_DELEGATION_REVOKED), "REVOKED" }, \
{ BIT(NFS_DELEGATION_TEST_EXPIRED), "TEST_EXPIRED" }, \
{ BIT(NFS_DELEGATION_INODE_FREEING), "INODE_FREEING" }, \
{ BIT(NFS_DELEGATION_RETURN_DELAYED), "RETURN_DELAYED" })
{ BIT(NFS_DELEGATION_TEST_EXPIRED), "TEST_EXPIRED" })
DECLARE_EVENT_CLASS(nfs4_delegation_event,
TP_PROTO(
@@ -1070,7 +1064,6 @@ TRACE_EVENT(nfs4_delegreturn_exit,
)
);
#ifdef CONFIG_NFS_V4_1
DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
TP_PROTO(
const struct nfs4_state *state,
@@ -1125,7 +1118,6 @@ DECLARE_EVENT_CLASS(nfs4_test_stateid_event,
DEFINE_NFS4_TEST_STATEID_EVENT(nfs4_test_delegation_stateid);
DEFINE_NFS4_TEST_STATEID_EVENT(nfs4_test_open_stateid);
DEFINE_NFS4_TEST_STATEID_EVENT(nfs4_test_lock_stateid);
#endif /* CONFIG_NFS_V4_1 */
DECLARE_EVENT_CLASS(nfs4_lookup_event,
TP_PROTO(
@@ -1628,12 +1620,8 @@ DEFINE_NFS4_IDMAP_EVENT(nfs4_map_group_to_gid);
DEFINE_NFS4_IDMAP_EVENT(nfs4_map_uid_to_name);
DEFINE_NFS4_IDMAP_EVENT(nfs4_map_gid_to_group);
#ifdef CONFIG_NFS_V4_1
#define NFS4_LSEG_LAYOUT_STATEID_HASH(lseg) \
(lseg ? nfs_stateid_hash(&lseg->pls_layout->plh_stateid) : 0)
#else
#define NFS4_LSEG_LAYOUT_STATEID_HASH(lseg) (0)
#endif
DECLARE_EVENT_CLASS(nfs4_read_event,
TP_PROTO(
@@ -1705,9 +1693,7 @@ DECLARE_EVENT_CLASS(nfs4_read_event,
), \
TP_ARGS(hdr, error))
DEFINE_NFS4_READ_EVENT(nfs4_read);
#ifdef CONFIG_NFS_V4_1
DEFINE_NFS4_READ_EVENT(nfs4_pnfs_read);
#endif /* CONFIG_NFS_V4_1 */
DECLARE_EVENT_CLASS(nfs4_write_event,
TP_PROTO(
@@ -1780,9 +1766,7 @@ DECLARE_EVENT_CLASS(nfs4_write_event,
), \
TP_ARGS(hdr, error))
DEFINE_NFS4_WRITE_EVENT(nfs4_write);
#ifdef CONFIG_NFS_V4_1
DEFINE_NFS4_WRITE_EVENT(nfs4_pnfs_write);
#endif /* CONFIG_NFS_V4_1 */
DECLARE_EVENT_CLASS(nfs4_commit_event,
TP_PROTO(
@@ -1842,7 +1826,6 @@ DECLARE_EVENT_CLASS(nfs4_commit_event,
), \
TP_ARGS(data, error))
DEFINE_NFS4_COMMIT_EVENT(nfs4_commit);
#ifdef CONFIG_NFS_V4_1
DEFINE_NFS4_COMMIT_EVENT(nfs4_pnfs_commit_ds);
TRACE_EVENT(nfs4_layoutget,
@@ -2876,8 +2859,6 @@ DEFINE_NFS4_XATTR_EVENT(nfs4_removexattr);
DEFINE_NFS4_INODE_EVENT(nfs4_listxattr);
#endif /* CONFIG_NFS_V4_2 */
#endif /* CONFIG_NFS_V4_1 */
#endif /* _TRACE_NFS4_H */
#undef TRACE_INCLUDE_PATH

View File

@@ -308,7 +308,6 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_secinfo_maxsz (op_encode_hdr_maxsz + nfs4_name_maxsz)
#define decode_secinfo_maxsz (op_decode_hdr_maxsz + 1 + ((NFS_MAX_SECFLAVORS * (16 + GSS_OID_MAX_LEN)) / 4))
#if defined(CONFIG_NFS_V4_1)
#define NFS4_MAX_MACHINE_NAME_LEN (64)
#define IMPL_NAME_LIMIT (sizeof(utsname()->sysname) + sizeof(utsname()->release) + \
sizeof(utsname()->version) + sizeof(utsname()->machine) + 8)
@@ -455,16 +454,6 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_free_stateid_maxsz (op_encode_hdr_maxsz + 1 + \
XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_free_stateid_maxsz (op_decode_hdr_maxsz)
#else /* CONFIG_NFS_V4_1 */
#define encode_sequence_maxsz 0
#define decode_sequence_maxsz 0
#define encode_get_dir_deleg_maxsz 0
#define decode_get_dir_deleg_maxsz 0
#define encode_layoutreturn_maxsz 0
#define decode_layoutreturn_maxsz 0
#define encode_layoutget_maxsz 0
#define decode_layoutget_maxsz 0
#endif /* CONFIG_NFS_V4_1 */
#define NFS4_enc_compound_sz (1024) /* XXX: large enough? */
#define NFS4_dec_compound_sz (1024) /* XXX: large enough? */
@@ -838,7 +827,6 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
decode_putfh_maxsz + \
decode_getfh_maxsz + \
decode_renew_maxsz)
#if defined(CONFIG_NFS_V4_1)
#define NFS4_enc_bind_conn_to_session_sz \
(compound_encode_hdr_maxsz + \
encode_bind_conn_to_session_maxsz)
@@ -871,7 +859,6 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define NFS4_dec_sequence_sz \
(compound_decode_hdr_maxsz + \
decode_sequence_maxsz)
#endif
#define NFS4_enc_get_lease_time_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putrootfh_maxsz + \
@@ -880,7 +867,6 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
decode_sequence_maxsz + \
decode_putrootfh_maxsz + \
decode_fsinfo_maxsz)
#if defined(CONFIG_NFS_V4_1)
#define NFS4_enc_reclaim_complete_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_reclaim_complete_maxsz)
@@ -958,7 +944,6 @@ const u32 nfs41_maxgetdevinfo_overhead = ((RPC_MAX_REPHEADER_WITH_AUTH +
decode_sequence_maxsz) *
XDR_UNIT);
EXPORT_SYMBOL_GPL(nfs41_maxgetdevinfo_overhead);
#endif /* CONFIG_NFS_V4_1 */
static const umode_t nfs_type2fmt[] = {
[NF4BAD] = 0,
@@ -1399,11 +1384,13 @@ static void encode_locku(struct xdr_stream *xdr, const struct nfs_locku_args *ar
xdr_encode_hyper(p, nfs4_lock_length(args->fl));
}
#if defined(CONFIG_NFS_V4_0)
static void encode_release_lockowner(struct xdr_stream *xdr, const struct nfs_lowner *lowner, struct compound_hdr *hdr)
{
encode_op_hdr(xdr, OP_RELEASE_LOCKOWNER, decode_release_lockowner_maxsz, hdr);
encode_lockowner(xdr, lowner);
}
#endif /* CONFIG_NFS_V4_0 */
static void encode_lookup(struct xdr_stream *xdr, const struct qstr *name, struct compound_hdr *hdr)
{
@@ -1832,7 +1819,6 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
encode_string(xdr, name->len, name->name);
}
#if defined(CONFIG_NFS_V4_1)
/* NFSv4.1 operations */
static void encode_bind_conn_to_session(struct xdr_stream *xdr,
const struct nfs41_bind_conn_to_session_args *args,
@@ -1984,13 +1970,11 @@ static void encode_reclaim_complete(struct xdr_stream *xdr,
encode_op_hdr(xdr, OP_RECLAIM_COMPLETE, decode_reclaim_complete_maxsz, hdr);
encode_uint32(xdr, args->one_fs);
}
#endif /* CONFIG_NFS_V4_1 */
static void encode_sequence(struct xdr_stream *xdr,
const struct nfs4_sequence_args *args,
struct compound_hdr *hdr)
{
#if defined(CONFIG_NFS_V4_1)
struct nfs4_session *session;
struct nfs4_slot_table *tp;
struct nfs4_slot *slot = args->sa_slot;
@@ -2021,10 +2005,8 @@ static void encode_sequence(struct xdr_stream *xdr,
*p++ = cpu_to_be32(slot->slot_nr);
*p++ = cpu_to_be32(tp->highest_used_slotid);
*p = cpu_to_be32(args->sa_cache_this);
#endif /* CONFIG_NFS_V4_1 */
}
#ifdef CONFIG_NFS_V4_1
static void
encode_get_dir_delegation(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
@@ -2186,26 +2168,6 @@ static void encode_free_stateid(struct xdr_stream *xdr,
encode_op_hdr(xdr, OP_FREE_STATEID, decode_free_stateid_maxsz, hdr);
encode_nfs4_stateid(xdr, &args->stateid);
}
#else
static inline void
encode_get_dir_delegation(struct xdr_stream *xdr, struct compound_hdr *hdr)
{
}
static inline void
encode_layoutreturn(struct xdr_stream *xdr,
const struct nfs4_layoutreturn_args *args,
struct compound_hdr *hdr)
{
}
static void
encode_layoutget(struct xdr_stream *xdr,
const struct nfs4_layoutget_args *args,
struct compound_hdr *hdr)
{
}
#endif /* CONFIG_NFS_V4_1 */
/*
* END OF "GENERIC" ENCODE ROUTINES.
@@ -2213,11 +2175,9 @@ encode_layoutget(struct xdr_stream *xdr,
static u32 nfs4_xdr_minorversion(const struct nfs4_sequence_args *args)
{
#if defined(CONFIG_NFS_V4_1)
struct nfs4_session *session = args->sa_slot->table->session;
if (session)
return session->clp->cl_mvops->minor_version;
#endif /* CONFIG_NFS_V4_1 */
return 0;
}
@@ -2583,6 +2543,7 @@ static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_nops(&hdr);
}
#if defined(CONFIG_NFS_V4_0)
static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
struct xdr_stream *xdr,
const void *data)
@@ -2596,6 +2557,7 @@ static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
encode_release_lockowner(xdr, &args->lock_owner, &hdr);
encode_nops(&hdr);
}
#endif /* CONFIG_NFS_V4_0 */
/*
* Encode a READLINK request
@@ -2825,6 +2787,7 @@ static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
/*
* a RENEW request
*/
#if defined(CONFIG_NFS_V4_0)
static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
const void *data)
@@ -2838,6 +2801,7 @@ static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_renew(xdr, clp->cl_clientid, &hdr);
encode_nops(&hdr);
}
#endif /* CONFIG_NFS_V4_0 */
/*
* a SETCLIENTID request
@@ -2971,7 +2935,6 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
encode_nops(&hdr);
}
#if defined(CONFIG_NFS_V4_1)
/*
* BIND_CONN_TO_SESSION request
*/
@@ -3073,8 +3036,6 @@ static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
encode_nops(&hdr);
}
#endif
/*
* a GET_LEASE_TIME request
*/
@@ -3095,8 +3056,6 @@ static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
encode_nops(&hdr);
}
#ifdef CONFIG_NFS_V4_1
/*
* a RECLAIM_COMPLETE request
*/
@@ -3259,7 +3218,6 @@ static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req,
encode_free_stateid(xdr, args, &hdr);
encode_nops(&hdr);
}
#endif /* CONFIG_NFS_V4_1 */
static int decode_opaque_inline(struct xdr_stream *xdr, unsigned int *len, char **string)
{
@@ -5224,10 +5182,12 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
return status;
}
#if defined(CONFIG_NFS_V4_0)
static int decode_release_lockowner(struct xdr_stream *xdr)
{
return decode_op_hdr(xdr, OP_RELEASE_LOCKOWNER);
}
#endif /* CONFIG_NFS_V4_0 */
static int decode_lookup(struct xdr_stream *xdr)
{
@@ -5756,7 +5716,6 @@ static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
return decode_secinfo_common(xdr, res);
}
#if defined(CONFIG_NFS_V4_1)
static int decode_secinfo_no_name(struct xdr_stream *xdr, struct nfs4_secinfo_res *res)
{
int status = decode_op_hdr(xdr, OP_SECINFO_NO_NAME);
@@ -5968,13 +5927,11 @@ static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
{
return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
}
#endif /* CONFIG_NFS_V4_1 */
static int decode_sequence(struct xdr_stream *xdr,
struct nfs4_sequence_res *res,
struct rpc_rqst *rqstp)
{
#if defined(CONFIG_NFS_V4_1)
struct nfs4_session *session;
struct nfs4_sessionid id;
u32 dummy;
@@ -6034,12 +5991,8 @@ out_err:
out_overflow:
status = -EIO;
goto out_err;
#else /* CONFIG_NFS_V4_1 */
return 0;
#endif /* CONFIG_NFS_V4_1 */
}
#if defined(CONFIG_NFS_V4_1)
static int decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
stateid->type = NFS4_LAYOUT_STATEID_TYPE;
@@ -6302,27 +6255,6 @@ static int decode_free_stateid(struct xdr_stream *xdr,
res->status = decode_op_hdr(xdr, OP_FREE_STATEID);
return res->status;
}
#else
static int decode_get_dir_delegation(struct xdr_stream *xdr,
struct nfs4_getattr_res *res)
{
return 0;
}
static inline
int decode_layoutreturn(struct xdr_stream *xdr,
struct nfs4_layoutreturn_res *res)
{
return 0;
}
static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
struct nfs4_layoutget_res *res)
{
return 0;
}
#endif /* CONFIG_NFS_V4_1 */
/*
* END OF "GENERIC" DECODE ROUTINES.
@@ -6930,6 +6862,7 @@ out:
return status;
}
#if defined(CONFIG_NFS_V4_0)
static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
struct xdr_stream *xdr, void *dummy)
{
@@ -6941,6 +6874,7 @@ static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
status = decode_release_lockowner(xdr);
return status;
}
#endif /* CONFIG_NFS_V4_0 */
/*
* Decode READLINK response
@@ -7162,6 +7096,7 @@ out:
/*
* Decode RENEW response
*/
#if defined(CONFIG_NFS_V4_0)
static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
void *__unused)
{
@@ -7173,6 +7108,7 @@ static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
status = decode_renew(xdr);
return status;
}
#endif /* CONFIG_NFS_V4_0 */
/*
* Decode SETCLIENTID response
@@ -7347,7 +7283,6 @@ out:
return status;
}
#if defined(CONFIG_NFS_V4_1)
/*
* Decode BIND_CONN_TO_SESSION response
*/
@@ -7444,8 +7379,6 @@ static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
return status;
}
#endif
/*
* Decode GET_LEASE_TIME response
*/
@@ -7467,8 +7400,6 @@ static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
return status;
}
#ifdef CONFIG_NFS_V4_1
/*
* Decode RECLAIM_COMPLETE response
*/
@@ -7656,7 +7587,6 @@ static int nfs4_xdr_dec_free_stateid(struct rpc_rqst *rqstp,
out:
return status;
}
#endif /* CONFIG_NFS_V4_1 */
/**
* nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in
@@ -7754,13 +7684,16 @@ int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
.p_name = #proc, \
}
#if defined(CONFIG_NFS_V4_1)
#define PROC41(proc, argtype, restype) \
#if defined(CONFIG_NFS_V4_0)
#define PROC40(proc, argtype, restype) \
PROC(proc, argtype, restype)
#else
#define PROC41(proc, argtype, restype) \
#define PROC40(proc, argtype, restype) \
STUB(proc)
#endif
#endif /* CONFIG_NFS_V4_0 */
#define PROC41(proc, argtype, restype) \
PROC(proc, argtype, restype)
#if defined(CONFIG_NFS_V4_2)
#define PROC42(proc, argtype, restype) \
@@ -7781,7 +7714,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC(CLOSE, enc_close, dec_close),
PROC(SETATTR, enc_setattr, dec_setattr),
PROC(FSINFO, enc_fsinfo, dec_fsinfo),
PROC(RENEW, enc_renew, dec_renew),
PROC40(RENEW, enc_renew, dec_renew),
PROC(SETCLIENTID, enc_setclientid, dec_setclientid),
PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
PROC(LOCK, enc_lock, dec_lock),
@@ -7805,7 +7738,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC(GETACL, enc_getacl, dec_getacl),
PROC(SETACL, enc_setacl, dec_setacl),
PROC(FS_LOCATIONS, enc_fs_locations, dec_fs_locations),
PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
PROC40(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
PROC(SECINFO, enc_secinfo, dec_secinfo),
PROC(FSID_PRESENT, enc_fsid_present, dec_fsid_present),
PROC41(EXCHANGE_ID, enc_exchange_id, dec_exchange_id),

View File

@@ -463,7 +463,8 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
};
struct pnfs_layout_segment *lseg, *next;
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
if (test_and_set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
return !list_empty(&lo->plh_segs);
clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(lo->plh_inode)->flags);
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
pnfs_clear_lseg_state(lseg, lseg_list);

View File

@@ -84,7 +84,7 @@ enum pnfs_try_status {
PNFS_TRY_AGAIN = 2,
};
#ifdef CONFIG_NFS_V4_1
#if IS_ENABLED(CONFIG_NFS_V4)
#define LAYOUT_NFSV4_1_MODULE_PREFIX "nfs-layouttype4"
@@ -704,7 +704,7 @@ static inline void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id)
}
#endif /* NFS_DEBUG */
#else /* CONFIG_NFS_V4_1 */
#else /* CONFIG_NFS_V4 */
static inline bool nfs_have_layout(struct inode *inode)
{
@@ -913,7 +913,7 @@ static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
return false;
}
#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
#if IS_ENABLED(CONFIG_NFS_V4_2)
int pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags);

View File

@@ -697,11 +697,10 @@ static int nfs_have_delegation(struct inode *inode, fmode_t type, int flags)
return 0;
}
static int nfs_return_delegation(struct inode *inode)
static void nfs_return_delegation(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
nfs_wb_all(inode);
return 0;
}
static const struct inode_operations nfs_dir_inode_operations = {

View File

@@ -68,10 +68,10 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_server *server = NFS_SERVER(inode);
const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
#ifdef CONFIG_NFS_V4_1
#if IS_ENABLED(CONFIG_NFS_V4)
if (server->pnfs_curr_ld && !force_mds)
pg_ops = server->pnfs_curr_ld->pg_read_ops;
#endif
#endif /* CONFIG_NFS_V4 */
nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
server->rsize, 0);
}

View File

@@ -212,15 +212,14 @@ void nfs_sb_deactive(struct super_block *sb)
}
EXPORT_SYMBOL_GPL(nfs_sb_deactive);
static int __nfs_list_for_each_server(struct list_head *head,
int (*fn)(struct nfs_server *, void *),
void *data)
int nfs_client_for_each_server(struct nfs_client *clp,
int (*fn)(struct nfs_server *server, void *data), void *data)
{
struct nfs_server *server, *last = NULL;
int ret = 0;
rcu_read_lock();
list_for_each_entry_rcu(server, head, client_link) {
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
if (!(server->super && nfs_sb_active(server->super)))
continue;
rcu_read_unlock();
@@ -239,13 +238,6 @@ out:
nfs_sb_deactive(last->super);
return ret;
}
int nfs_client_for_each_server(struct nfs_client *clp,
int (*fn)(struct nfs_server *, void *),
void *data)
{
return __nfs_list_for_each_server(&clp->cl_superblocks, fn, data);
}
EXPORT_SYMBOL_GPL(nfs_client_for_each_server);
/*
@@ -597,18 +589,13 @@ static void show_lease(struct seq_file *m, struct nfs_server *server)
seq_printf(m, ",lease_expired=%ld",
time_after(expire, jiffies) ? 0 : (jiffies - expire) / HZ);
}
#ifdef CONFIG_NFS_V4_1
static void show_sessions(struct seq_file *m, struct nfs_server *server)
{
if (nfs4_has_session(server->nfs_client))
seq_puts(m, ",sessions");
}
#else
static void show_sessions(struct seq_file *m, struct nfs_server *server) {}
#endif
#endif
#ifdef CONFIG_NFS_V4_1
static void show_pnfs(struct seq_file *m, struct nfs_server *server)
{
seq_printf(m, ",pnfs=");
@@ -628,16 +615,11 @@ static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
impl_id->date.seconds, impl_id->date.nseconds);
}
}
#else
#if IS_ENABLED(CONFIG_NFS_V4)
static void show_pnfs(struct seq_file *m, struct nfs_server *server)
{
}
#endif
#else /* CONFIG_NFS_V4 */
static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
{
}
#endif
#endif /* CONFIG_NFS_V4 */
int nfs_show_devname(struct seq_file *m, struct dentry *root)
{

View File

@@ -293,7 +293,7 @@ out:
static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown);
#if IS_ENABLED(CONFIG_NFS_V4_1)
#if IS_ENABLED(CONFIG_NFS_V4)
static ssize_t
implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
@@ -323,7 +323,7 @@ implid_name_show(struct kobject *kobj, struct kobj_attribute *attr,
static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name);
#endif /* IS_ENABLED(CONFIG_NFS_V4_1) */
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
#define RPC_CLIENT_NAME_SIZE 64
@@ -362,7 +362,7 @@ static struct kobj_type nfs_sb_ktype = {
.child_ns_type = nfs_netns_object_child_ns_type,
};
#if IS_ENABLED(CONFIG_NFS_V4_1)
#if IS_ENABLED(CONFIG_NFS_V4)
static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
{
int ret;
@@ -382,11 +382,11 @@ static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
server->s_sysfs_id, ret);
}
#else /* CONFIG_NFS_V4_1 */
#else /* CONFIG_NFS_V4 */
static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
{
}
#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
#if IS_ENABLED(CONFIG_NFS_LOCALIO)

View File

@@ -1402,7 +1402,7 @@ void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_server *server = NFS_SERVER(inode);
const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
#ifdef CONFIG_NFS_V4_1
#if IS_ENABLED(CONFIG_NFS_V4)
if (server->pnfs_curr_ld && !force_mds)
pg_ops = server->pnfs_curr_ld->pg_write_ops;
#endif

View File

@@ -115,9 +115,7 @@ struct nfs_client {
#define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */
#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
#define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */
#if IS_ENABLED(CONFIG_NFS_V4_1)
wait_queue_head_t cl_lock_waitq;
#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
/* Our own IP address, as a null-terminated string.
@@ -259,6 +257,10 @@ struct nfs_server {
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
spinlock_t delegations_lock;
struct list_head delegations_return;
struct list_head delegations_lru;
struct list_head delegations_delayed;
atomic_long_t nr_active_delegations;
unsigned int delegation_hash_mask;
struct hlist_head *delegation_hash_table;
@@ -266,9 +268,7 @@ struct nfs_server {
struct list_head ss_src_copies;
unsigned long delegation_flags;
#define NFS4SERV_DELEGRETURN (1)
#define NFS4SERV_DELEGATION_EXPIRED (2)
#define NFS4SERV_DELEGRETURN_DELAYED (3)
#define NFS4SERV_DELEGATION_EXPIRED (1)
unsigned long delegation_gen;
unsigned long mig_gen;
unsigned long mig_status;

View File

@@ -209,6 +209,7 @@ struct nfs4_sequence_args {
};
struct nfs4_sequence_res {
const struct nfs4_sequence_slot_ops *sr_slot_ops;
struct nfs4_slot *sr_slot; /* slot used to send request */
unsigned long sr_timestamp;
int sr_status; /* sequence operation status */
@@ -1323,10 +1324,6 @@ struct nfs4_fsid_present_res {
unsigned char renew:1;
};
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_1
struct pnfs_commit_bucket {
struct list_head written;
struct list_head committing;
@@ -1466,7 +1463,7 @@ struct nfs41_free_stateid_res {
struct pnfs_ds_commit_info {
};
#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_2
struct nfs42_falloc_args {
@@ -1849,7 +1846,7 @@ struct nfs_rpc_ops {
struct iattr *iattr,
int *);
int (*have_delegation)(struct inode *, fmode_t, int);
int (*return_delegation)(struct inode *);
void (*return_delegation)(struct inode *);
struct nfs_client *(*alloc_client) (const struct nfs_client_initdata *);
struct nfs_client *(*init_client) (struct nfs_client *,
const struct nfs_client_initdata *);

View File

@@ -14,12 +14,10 @@
/*
* Debugging macros etc
*/
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
extern unsigned int rpc_debug;
extern unsigned int nfs_debug;
extern unsigned int nfsd_debug;
extern unsigned int nlm_debug;
#endif
#define dprintk(fmt, ...) \
dfprintk(FACILITY, fmt, ##__VA_ARGS__)

View File

@@ -39,6 +39,8 @@ static const struct rpc_authops authgss_ops;
static const struct rpc_credops gss_credops;
static const struct rpc_credops gss_nullops;
static void gss_free_callback(struct kref *kref);
#define GSS_RETRY_EXPIRED 5
static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
@@ -551,6 +553,7 @@ gss_alloc_msg(struct gss_auth *gss_auth,
}
return gss_msg;
err_put_pipe_version:
kref_put(&gss_auth->kref, gss_free_callback);
put_pipe_version(gss_auth->net);
err_free_msg:
kfree(gss_msg);

View File

@@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
{
struct rpc_rqst *req;
struct list_head tmp_list;
LIST_HEAD(tmp_list);
int i;
dprintk("RPC: setup backchannel transport\n");
@@ -163,7 +163,6 @@ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
* lock is held on the rpc_xprt struct. It also makes cleanup
* easier in case of memory allocation errors.
*/
INIT_LIST_HEAD(&tmp_list);
for (i = 0; i < min_reqs; i++) {
/* Pre-allocate one backchannel rpc_rqst */
req = xprt_alloc_bc_req(xprt);