diff options
54 files changed, 969 insertions, 1799 deletions
diff --git a/Documentation/filesystems/nfs/exporting.rst b/Documentation/filesystems/nfs/exporting.rst index 3d97b8d8f735..4b30daee399a 100644 --- a/Documentation/filesystems/nfs/exporting.rst +++ b/Documentation/filesystems/nfs/exporting.rst @@ -215,3 +215,29 @@ following flags are defined: This flag causes nfsd to close any open files for this inode _before_ calling into the vfs to do an unlink or a rename that would replace an existing file. + + EXPORT_OP_REMOTE_FS - Backing storage for this filesystem is remote + PF_LOCAL_THROTTLE exists for loopback NFSD, where a thread needs to + write to one bdi (the final bdi) in order to free up writes queued + to another bdi (the client bdi). Such threads get a private balance + of dirty pages so that dirty pages for the client bdi do not imact + the daemon writing to the final bdi. For filesystems whose durable + storage is not local (such as exported NFS filesystems), this + constraint has negative consequences. EXPORT_OP_REMOTE_FS enables + an export to disable writeback throttling. + + EXPORT_OP_NOATOMIC_ATTR - Filesystem does not update attributes atomically + EXPORT_OP_NOATOMIC_ATTR indicates that the exported filesystem + cannot provide the semantics required by the "atomic" boolean in + NFSv4's change_info4. This boolean indicates to a client whether the + returned before and after change attributes were obtained atomically + with the respect to the requested metadata operation (UNLINK, + OPEN/CREATE, MKDIR, etc). + + EXPORT_OP_FLUSH_ON_CLOSE - Filesystem flushes file data on close(2) + On most filesystems, inodes can remain under writeback after the + file is closed. NFSD relies on client activity or local flusher + threads to handle writeback. Certain filesystems, such as NFS, flush + all of an inode's dirty data on last close. Exports that behave this + way should set EXPORT_OP_FLUSH_ON_CLOSE so that NFSD knows to skip + waiting for writeback when closing such files. diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index d1dbe47c7975..c20704aa21b3 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -386,6 +386,7 @@ static int export_encode_fh(struct inode *inode, struct fid *fid, * @inode: the object to encode * @fid: where to store the file handle fragment * @max_len: maximum length to store there + * @parent: parent directory inode, if wanted * @flags: properties of the requested file handle * * Returns an enum fid_type or a negative errno. diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c index 1d9488cf0534..87a0f207df0b 100644 --- a/fs/lockd/mon.c +++ b/fs/lockd/mon.c @@ -276,6 +276,9 @@ static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, { struct nsm_handle *new; + if (!hostname) + return NULL; + new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL); if (unlikely(new == NULL)) return NULL; diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 22d3ff3818f5..6579948070a4 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -45,7 +45,6 @@ #define NLMDBG_FACILITY NLMDBG_SVC #define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE) -#define ALLOWED_SIGS (sigmask(SIGKILL)) static struct svc_program nlmsvc_program; @@ -57,6 +56,12 @@ static unsigned int nlmsvc_users; static struct svc_serv *nlmsvc_serv; unsigned long nlmsvc_timeout; +static void nlmsvc_request_retry(struct timer_list *tl) +{ + svc_wake_up(nlmsvc_serv); +} +DEFINE_TIMER(nlmsvc_retry, nlmsvc_request_retry); + unsigned int lockd_net_id; /* @@ -111,26 +116,12 @@ static void set_grace_period(struct net *net) schedule_delayed_work(&ln->grace_period_end, grace_period); } -static void restart_grace(void) -{ - if (nlmsvc_ops) { - struct net *net = &init_net; - struct lockd_net *ln = net_generic(net, lockd_net_id); - - cancel_delayed_work_sync(&ln->grace_period_end); - locks_end_grace(&ln->lockd_manager); - nlmsvc_invalidate_all(); - set_grace_period(net); - } -} - /* * This is the lockd kernel thread */ static int lockd(void *vrqstp) { - int err = 0; struct svc_rqst *rqstp = vrqstp; struct net *net = &init_net; struct lockd_net *ln = net_generic(net, lockd_net_id); @@ -138,9 +129,6 @@ lockd(void *vrqstp) /* try_to_freeze() is called from svc_recv() */ set_freezable(); - /* Allow SIGKILL to tell lockd to drop all of its locks */ - allow_signal(SIGKILL); - dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); /* @@ -148,33 +136,12 @@ lockd(void *vrqstp) * NFS mount or NFS daemon has gone away. */ while (!kthread_should_stop()) { - long timeout = MAX_SCHEDULE_TIMEOUT; - RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); - /* update sv_maxconn if it has changed */ rqstp->rq_server->sv_maxconn = nlm_max_connections; - if (signalled()) { - flush_signals(current); - restart_grace(); - continue; - } - - timeout = nlmsvc_retry_blocked(); - - /* - * Find a socket with data available and call its - * recvfrom routine. - */ - err = svc_recv(rqstp, timeout); - if (err == -EAGAIN || err == -EINTR) - continue; - dprintk("lockd: request from %s\n", - svc_print_addr(rqstp, buf, sizeof(buf))); - - svc_process(rqstp); + nlmsvc_retry_blocked(); + svc_recv(rqstp); } - flush_signals(current); if (nlmsvc_ops) nlmsvc_invalidate_all(); nlm_shutdown_hosts(); @@ -407,6 +374,7 @@ static void lockd_put(void) #endif svc_set_num_threads(nlmsvc_serv, NULL, 0); + timer_delete_sync(&nlmsvc_retry); nlmsvc_serv = NULL; dprintk("lockd_down: service destroyed\n"); } @@ -538,7 +506,7 @@ static inline int is_callback(u32 proc) } -static int lockd_authenticate(struct svc_rqst *rqstp) +static enum svc_auth_status lockd_authenticate(struct svc_rqst *rqstp) { rqstp->rq_client = NULL; switch (rqstp->rq_authop->flavour) { diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index c43ccdf28ed9..43aeba9de55c 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -131,12 +131,14 @@ static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) static inline void nlmsvc_remove_block(struct nlm_block *block) { + spin_lock(&nlm_blocked_lock); if (!list_empty(&block->b_list)) { - spin_lock(&nlm_blocked_lock); list_del_init(&block->b_list); spin_unlock(&nlm_blocked_lock); nlmsvc_release_block(block); + return; } + spin_unlock(&nlm_blocked_lock); } /* @@ -152,6 +154,7 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) file, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, lock->fl.fl_type); + spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { fl = &block->b_call->a_args.lock.fl; dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", @@ -161,9 +164,11 @@ nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) nlmdbg_cookie2a(&block->b_call->a_args.cookie)); if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { kref_get(&block->b_count); + spin_unlock(&nlm_blocked_lock); return block; } } + spin_unlock(&nlm_blocked_lock); return NULL; } @@ -185,16 +190,19 @@ nlmsvc_find_block(struct nlm_cookie *cookie) { struct nlm_block *block; + spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) goto found; } + spin_unlock(&nlm_blocked_lock); return NULL; found: dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); kref_get(&block->b_count); + spin_unlock(&nlm_blocked_lock); return block; } @@ -317,6 +325,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *host, restart: mutex_lock(&file->f_mutex); + spin_lock(&nlm_blocked_lock); list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { if (!match(block->b_host, host)) continue; @@ -325,11 +334,13 @@ restart: if (list_empty(&block->b_list)) continue; kref_get(&block->b_count); + spin_unlock(&nlm_blocked_lock); mutex_unlock(&file->f_mutex); nlmsvc_unlink_block(block); nlmsvc_release_block(block); goto restart; } + spin_unlock(&nlm_blocked_lock); mutex_unlock(&file->f_mutex); } @@ -1008,7 +1019,7 @@ retry_deferred_block(struct nlm_block *block) * picks up locks that can be granted, or grant notifications that must * be retransmitted. */ -unsigned long +void nlmsvc_retry_blocked(void) { unsigned long timeout = MAX_SCHEDULE_TIMEOUT; @@ -1038,5 +1049,6 @@ nlmsvc_retry_blocked(void) } spin_unlock(&nlm_blocked_lock); - return timeout; + if (timeout < MAX_SCHEDULE_TIMEOUT) + mod_timer(&nlmsvc_retry, jiffies + timeout); } diff --git a/fs/locks.c b/fs/locks.c index a45efc16945d..76ad05f8070a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1744,13 +1744,6 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri if (is_deleg && !inode_trylock(inode)) return -EAGAIN; - if (is_deleg && arg == F_WRLCK) { - /* Write delegations are not currently supported: */ - inode_unlock(inode); - WARN_ON_ONCE(1); - return -EINVAL; - } - percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 456af7d230cf..466ebf1d41b2 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -74,23 +74,12 @@ out_err: static int nfs4_callback_svc(void *vrqstp) { - int err; struct svc_rqst *rqstp = vrqstp; set_freezable(); - while (!kthread_freezable_should_stop(NULL)) { - - if (signal_pending(current)) - flush_signals(current); - /* - * Listen for a request on the socket - */ - err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT); - if (err == -EAGAIN || err == -EINTR) - continue; - svc_process(rqstp); - } + while (!kthread_freezable_should_stop(NULL)) + svc_recv(rqstp); svc_exit_thread(rqstp); return 0; @@ -112,11 +101,7 @@ nfs41_callback_svc(void *vrqstp) set_freezable(); while (!kthread_freezable_should_stop(NULL)) { - - if (signal_pending(current)) - flush_signals(current); - - prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); + prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_IDLE); spin_lock_bh(&serv->sv_cb_lock); if (!list_empty(&serv->sv_cb_list)) { req = list_first_entry(&serv->sv_cb_list, @@ -387,7 +372,7 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) * All other checking done after NFS decoding where the nfs_client can be * found in nfs4_callback_compound */ -static int nfs_callback_authenticate(struct svc_rqst *rqstp) +static enum svc_auth_status nfs_callback_authenticate(struct svc_rqst *rqstp) { rqstp->rq_auth_stat = rpc_autherr_badcred; diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c index 8e9c1a0f8d38..1ed2f691ebb9 100644 --- a/fs/nfsd/blocklayoutxdr.c +++ b/fs/nfsd/blocklayoutxdr.c @@ -83,6 +83,15 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr, int len = sizeof(__be32), ret, i; __be32 *p; + /* + * See paragraph 5 of RFC 8881 S18.40.3. + */ + if (!gdp->gd_maxcount) { + if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT) + return nfserr_resource; + return nfs_ok; + } + p = xdr_reserve_space(xdr, len + sizeof(__be32)); if (!p) return nfserr_resource; diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h index 4c9b87850ab1..929248c6ca84 100644 --- a/fs/nfsd/cache.h +++ b/fs/nfsd/cache.h @@ -19,7 +19,7 @@ * typical sockaddr_storage. This is for space reasons, since sockaddr_storage * is much larger than a sockaddr_in6. */ -struct svc_cacherep { +struct nfsd_cacherep { struct { /* Keep often-read xid, csum in the same cache line: */ __be32 k_xid; @@ -84,8 +84,10 @@ int nfsd_net_reply_cache_init(struct nfsd_net *nn); void nfsd_net_reply_cache_destroy(struct nfsd_net *nn); int nfsd_reply_cache_init(struct nfsd_net *); void nfsd_reply_cache_shutdown(struct nfsd_net *); -int nfsd_cache_lookup(struct svc_rqst *); -void nfsd_cache_update(struct svc_rqst *, int, __be32 *); +int nfsd_cache_lookup(struct svc_rqst *rqstp, + struct nfsd_cacherep **cacherep); +void nfsd_cache_update(struct svc_rqst *rqstp, struct nfsd_cacherep *rp, + int cachetype, __be32 *statp); int nfsd_reply_cache_stats_show(struct seq_file *m, void *v); #endif /* NFSCACHE_H */ diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c index e81d2a5cf381..bb205328e043 100644 --- a/fs/nfsd/flexfilelayoutxdr.c +++ b/fs/nfsd/flexfilelayoutxdr.c @@ -85,6 +85,15 @@ nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr, int addr_len; __be32 *p; + /* + * See paragraph 5 of RFC 8881 S18.40.3. + */ + if (!gdp->gd_maxcount) { + if (xdr_stream_encode_u32(xdr, 0) != XDR_UNIT) + return nfserr_resource; + return nfs_ok; + } + /* len + padding for two strings */ addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len; ver_len = 20; diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index fc8d5b7db9f8..268ef57751c4 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -307,7 +307,9 @@ nfsd3_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp, if (!IS_POSIXACL(inode)) iap->ia_mode &= ~current_umask(); - fh_fill_pre_attrs(fhp); + status = fh_fill_pre_attrs(fhp); + if (status != nfs_ok) + goto out; host_err = vfs_create(&nop_mnt_idmap, inode, child, iap->ia_mode, true); if (host_err < 0) { status = nfserrno(host_err); diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c index 518203821790..96e786b5e544 100644 --- a/fs/nfsd/nfs4acl.c +++ b/fs/nfsd/nfs4acl.c @@ -441,7 +441,7 @@ struct posix_ace_state_array { * calculated so far: */ struct posix_acl_state { - int empty; + unsigned char valid; struct posix_ace_state owner; struct posix_ace_state group; struct posix_ace_state other; @@ -457,7 +457,6 @@ init_state(struct posix_acl_state *state, int cnt) int alloc; memset(state, 0, sizeof(struct posix_acl_state)); - state->empty = 1; /* * In the worst case, each individual acl could be for a distinct * named user or group, but we don't know which, so we allocate @@ -500,7 +499,7 @@ posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) * and effective cases: when there are no inheritable ACEs, * calls ->set_acl with a NULL ACL structure. */ - if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) + if (!state->valid && (flags & NFS4_ACL_TYPE_DEFAULT)) return NULL; /* @@ -622,11 +621,12 @@ static void process_one_v4_ace(struct posix_acl_state *state, struct nfs4_ace *ace) { u32 mask = ace->access_mask; + short type = ace2type(ace); int i; - state->empty = 0; + state->valid |= type; - switch (ace2type(ace)) { + switch (type) { case ACL_USER_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); @@ -726,6 +726,30 @@ static int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE)) process_one_v4_ace(&effective_acl_state, ace); } + + /* + * At this point, the default ACL may have zeroed-out entries for owner, + * group and other. That usually results in a non-sensical resulting ACL + * that denies all access except to any ACE that was explicitly added. + * + * The setfacl command solves a similar problem with this logic: + * + * "If a Default ACL entry is created, and the Default ACL contains + * no owner, owning group, or others entry, a copy of the ACL + * owner, owning group, or others entry is added to the Default ACL." + * + * Copy any missing ACEs from the effective set, if any ACEs were + * explicitly set. + */ + if (default_acl_state.valid) { + if (!(default_acl_state.valid & ACL_USER_OBJ)) + default_acl_state.owner = effective_acl_state.owner; + if (!(default_acl_state.valid & ACL_GROUP_OBJ)) + default_acl_state.group = effective_acl_state.group; + if (!(default_acl_state.valid & ACL_OTHER)) + default_acl_state.other = effective_acl_state.other; + } + *pacl = posix_state_to_acl(&effective_acl_state, flags); if (IS_ERR(*pacl)) { ret = PTR_ERR(*pacl); diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 5ae670807449..5ca748309c26 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -297,12 +297,12 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp, } if (d_really_is_positive(child)) { - status = nfs_ok; - /* NFSv4 protocol requires change attributes even though * no change happened. */ - fh_fill_both_attrs(fhp); + status = fh_fill_both_attrs(fhp); + if (status != nfs_ok) + goto out; switch (open->op_createmode) { case NFS4_CREATE_UNCHECKED: @@ -345,7 +345,9 @@ nfsd4_create_file(struct svc_rqst *rqstp, struct svc_fh *fhp, if (!IS_POSIXACL(inode)) iap->ia_mode &= ~current_umask(); - fh_fill_pre_attrs(fhp); + status = fh_fill_pre_attrs(fhp); + if (status != nfs_ok) + goto out; status = nfsd4_vfs_create(fhp, child, open); if (status != nfs_ok) goto out; @@ -380,6 +382,38 @@ out: return status; } +/** + * set_change_info - set up the change_info4 for a reply + * @cinfo: pointer to nfsd4_change_info to be populated + * @fhp: pointer to svc_fh to use as source + * + * Many operations in NFSv4 require change_info4 in the reply. This function + * populates that from the info that we (should!) have already collected. In + * the event that we didn't get any pre-attrs, just zero out both. + */ +static void +set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) +{ + cinfo->atomic = (u32)(fhp->fh_pre_saved && fhp->fh_post_saved && !fhp->fh_no_atomic_attr); + cinfo->before_change = fhp->fh_pre_change; + cinfo->after_change = fhp->fh_post_change; + + /* + * If fetching the pre-change attributes failed, then we should + * have already failed the whole operation. We could have still + * failed to fetch post-change attributes however. + * + * If we didn't get post-op attrs, just zero-out the after + * field since we don't know what it should be. If the pre_saved + * field isn't set for some reason, throw warning and just copy + * whatever is in the after field. + */ + if (WARN_ON_ONCE(!fhp->fh_pre_saved)) + cinfo->before_change = 0; + if (!fhp->fh_post_saved) + cinfo->after_change = cinfo->before_change + 1; +} + static __be32 do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh **resfh) { @@ -424,11 +458,11 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru } else { status = nfsd_lookup(rqstp, current_fh, open->op_fname, open->op_fnamelen, *resfh); - if (!status) + if (status == nfs_ok) /* NFSv4 protocol requires change attributes even though * no change happened. */ - fh_fill_both_attrs(current_fh); + status = fh_fill_both_attrs(current_fh); } if (status) goto out; @@ -1313,12 +1347,11 @@ try_again: /* found a match */ if (ni->nsui_busy) { /* wait - and try again */ - prepare_to_wait(&nn->nfsd_ssc_waitq, &wait, - TASK_INTERRUPTIBLE); + prepare_to_wait(&nn->nfsd_ssc_waitq, &wait, TASK_IDLE); spin_unlock(&nn->nfsd_ssc_lock); /* allow 20secs for mount/unmount for now - revisit */ - if (signal_pending(current) || + if (kthread_should_stop() || (schedule_timeout(20*HZ) == 0)) { finish_wait(&nn->nfsd_ssc_waitq, &wait); kfree(work); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index daf305daa751..8534693eb6a4 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -649,6 +649,18 @@ find_readable_file(struct nfs4_file *f) return ret; } +static struct nfsd_file * +find_rw_file(struct nfs4_file *f) +{ + struct nfsd_file *ret; + + spin_lock(&f->fi_lock); + ret = nfsd_file_get(f->fi_fds[O_RDWR]); + spin_unlock(&f->fi_lock); + + return ret; +} + struct nfsd_file * find_any_file(struct nfs4_file *f) { @@ -1144,7 +1156,7 @@ static void block_delegations(struct knfsd_fh *fh) static struct nfs4_delegation * alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp, - struct nfs4_clnt_odstate *odstate) + struct nfs4_clnt_odstate *odstate, u32 dl_type) { struct nfs4_delegation *dp; long n; @@ -1170,7 +1182,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp, INIT_LIST_HEAD(&dp->dl_recall_lru); dp->dl_clnt_odstate = odstate; get_clnt_odstate(odstate); - dp->dl_type = NFS4_OPEN_DELEGATE_READ; + dp->dl_type = dl_type; dp->dl_retries = 1; dp->dl_recalled = false; nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, @@ -5449,8 +5461,9 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, struct nfs4_file *fp = stp->st_stid.sc_file; struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; struct nfs4_delegation *dp; - struct nfsd_file *nf; + struct nfsd_file *nf = NULL; struct file_lock *fl; + u32 dl_type; /* * The fi_had_conflict and nfs_get_existing_delegation checks @@ -5460,15 +5473,35 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, |
