Skip to content

Commit dbee1ad

Browse files
Alexander Aringteigland
authored andcommitted
dlm: use fl_owner from lockd
This patch is changing the fl_owner value in case of an nfs lock request to not be the pid of lockd. Instead this patch changes it to be the owner value that nfs is giving us. Currently there exists proved problems with this behaviour. One nfsd server was created to export a gfs2 filesystem mount. Two nfs clients doing a nfs mount of this export. Those two clients should conflict each other operating on the same nfs file. A small test program was written: int main(int argc, const char *argv[]) { struct flock fl = { .l_type = F_WRLCK, .l_whence = SEEK_SET, .l_start = 1L, .l_len = 1L, }; int fd; fd = open("filename", O_RDWR | O_CREAT, 0700); printf("try to lock...\n"); fcntl(fd, F_SETLKW, &fl); printf("locked!\n"); getc(stdin); return 0; } Running on both clients at the same time and don't interrupting by pressing any key. It will show that both clients are able to acquire the lock which shouldn't be the case. The issue is here that the fl_owner value is the same and the lock context of both clients should be separated. This patch lets lockd define how to deal with lock contexts and chose hopefully the right fl_owner value. A test after this patch was made and the locks conflicts each other which should be the case. Acked-by: Jeff Layton <jlayton@kernel.org> Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
1 parent e9cdebb commit dbee1ad

1 file changed

Lines changed: 4 additions & 14 deletions

File tree

fs/dlm/plock.c

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -145,6 +145,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
145145
op->info.number = number;
146146
op->info.start = fl->fl_start;
147147
op->info.end = fl->fl_end;
148+
op->info.owner = (__u64)(long)fl->fl_owner;
148149
/* async handling */
149150
if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
150151
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
@@ -154,9 +155,6 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
154155
goto out;
155156
}
156157

157-
/* fl_owner is lockd which doesn't distinguish
158-
processes on the nfs client */
159-
op->info.owner = (__u64) fl->fl_pid;
160158
op_data->callback = fl->fl_lmops->lm_grant;
161159
locks_init_lock(&op_data->flc);
162160
locks_copy_lock(&op_data->flc, fl);
@@ -168,8 +166,6 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
168166
send_op(op);
169167
rv = FILE_LOCK_DEFERRED;
170168
goto out;
171-
} else {
172-
op->info.owner = (__u64)(long) fl->fl_owner;
173169
}
174170

175171
send_op(op);
@@ -326,10 +322,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
326322
op->info.number = number;
327323
op->info.start = fl->fl_start;
328324
op->info.end = fl->fl_end;
329-
if (fl->fl_lmops && fl->fl_lmops->lm_grant)
330-
op->info.owner = (__u64) fl->fl_pid;
331-
else
332-
op->info.owner = (__u64)(long) fl->fl_owner;
325+
op->info.owner = (__u64)(long)fl->fl_owner;
333326

334327
if (fl->fl_flags & FL_CLOSE) {
335328
op->info.flags |= DLM_PLOCK_FL_CLOSE;
@@ -389,7 +382,7 @@ int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file,
389382
info.number = number;
390383
info.start = fl->fl_start;
391384
info.end = fl->fl_end;
392-
info.owner = (__u64)fl->fl_pid;
385+
info.owner = (__u64)(long)fl->fl_owner;
393386

394387
rv = do_lock_cancel(&info);
395388
switch (rv) {
@@ -450,10 +443,7 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
450443
op->info.number = number;
451444
op->info.start = fl->fl_start;
452445
op->info.end = fl->fl_end;
453-
if (fl->fl_lmops && fl->fl_lmops->lm_grant)
454-
op->info.owner = (__u64) fl->fl_pid;
455-
else
456-
op->info.owner = (__u64)(long) fl->fl_owner;
446+
op->info.owner = (__u64)(long)fl->fl_owner;
457447

458448
send_op(op);
459449
wait_event(recv_wq, (op->done != 0));

0 commit comments

Comments
 (0)