Loading...
1/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
29#include "smberr.h"
30#include "nterr.h"
31#include "cifs_unicode.h"
32
33extern mempool_t *cifs_sm_req_poolp;
34extern mempool_t *cifs_req_poolp;
35
36/* The xid serves as a useful identifier for each incoming vfs request,
37 in a similar way to the mid which is useful to track each sent smb,
38 and CurrentXid can also provide a running counter (although it
39 will eventually wrap past zero) of the total vfs operations handled
40 since the cifs fs was mounted */
41
42unsigned int
43_GetXid(void)
44{
45 unsigned int xid;
46
47 spin_lock(&GlobalMid_Lock);
48 GlobalTotalActiveXid++;
49
50 /* keep high water mark for number of simultaneous ops in filesystem */
51 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
52 GlobalMaxActiveXid = GlobalTotalActiveXid;
53 if (GlobalTotalActiveXid > 65000)
54 cFYI(1, "warning: more than 65000 requests active");
55 xid = GlobalCurrentXid++;
56 spin_unlock(&GlobalMid_Lock);
57 return xid;
58}
59
60void
61_FreeXid(unsigned int xid)
62{
63 spin_lock(&GlobalMid_Lock);
64 /* if (GlobalTotalActiveXid == 0)
65 BUG(); */
66 GlobalTotalActiveXid--;
67 spin_unlock(&GlobalMid_Lock);
68}
69
70struct cifs_ses *
71sesInfoAlloc(void)
72{
73 struct cifs_ses *ret_buf;
74
75 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
76 if (ret_buf) {
77 atomic_inc(&sesInfoAllocCount);
78 ret_buf->status = CifsNew;
79 ++ret_buf->ses_count;
80 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
81 INIT_LIST_HEAD(&ret_buf->tcon_list);
82 mutex_init(&ret_buf->session_mutex);
83 }
84 return ret_buf;
85}
86
87void
88sesInfoFree(struct cifs_ses *buf_to_free)
89{
90 if (buf_to_free == NULL) {
91 cFYI(1, "Null buffer passed to sesInfoFree");
92 return;
93 }
94
95 atomic_dec(&sesInfoAllocCount);
96 kfree(buf_to_free->serverOS);
97 kfree(buf_to_free->serverDomain);
98 kfree(buf_to_free->serverNOS);
99 if (buf_to_free->password) {
100 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
101 kfree(buf_to_free->password);
102 }
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kfree(buf_to_free);
106}
107
108struct cifs_tcon *
109tconInfoAlloc(void)
110{
111 struct cifs_tcon *ret_buf;
112 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
113 if (ret_buf) {
114 atomic_inc(&tconInfoAllocCount);
115 ret_buf->tidStatus = CifsNew;
116 ++ret_buf->tc_count;
117 INIT_LIST_HEAD(&ret_buf->openFileList);
118 INIT_LIST_HEAD(&ret_buf->tcon_list);
119#ifdef CONFIG_CIFS_STATS
120 spin_lock_init(&ret_buf->stat_lock);
121#endif
122 }
123 return ret_buf;
124}
125
126void
127tconInfoFree(struct cifs_tcon *buf_to_free)
128{
129 if (buf_to_free == NULL) {
130 cFYI(1, "Null buffer passed to tconInfoFree");
131 return;
132 }
133 atomic_dec(&tconInfoAllocCount);
134 kfree(buf_to_free->nativeFileSystem);
135 if (buf_to_free->password) {
136 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
137 kfree(buf_to_free->password);
138 }
139 kfree(buf_to_free);
140}
141
142struct smb_hdr *
143cifs_buf_get(void)
144{
145 struct smb_hdr *ret_buf = NULL;
146
147/* We could use negotiated size instead of max_msgsize -
148 but it may be more efficient to always alloc same size
149 albeit slightly larger than necessary and maxbuffersize
150 defaults to this and can not be bigger */
151 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
152
153 /* clear the first few header bytes */
154 /* for most paths, more is cleared in header_assemble */
155 if (ret_buf) {
156 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
157 atomic_inc(&bufAllocCount);
158#ifdef CONFIG_CIFS_STATS2
159 atomic_inc(&totBufAllocCount);
160#endif /* CONFIG_CIFS_STATS2 */
161 }
162
163 return ret_buf;
164}
165
166void
167cifs_buf_release(void *buf_to_free)
168{
169 if (buf_to_free == NULL) {
170 /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
171 return;
172 }
173 mempool_free(buf_to_free, cifs_req_poolp);
174
175 atomic_dec(&bufAllocCount);
176 return;
177}
178
179struct smb_hdr *
180cifs_small_buf_get(void)
181{
182 struct smb_hdr *ret_buf = NULL;
183
184/* We could use negotiated size instead of max_msgsize -
185 but it may be more efficient to always alloc same size
186 albeit slightly larger than necessary and maxbuffersize
187 defaults to this and can not be bigger */
188 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
189 if (ret_buf) {
190 /* No need to clear memory here, cleared in header assemble */
191 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
192 atomic_inc(&smBufAllocCount);
193#ifdef CONFIG_CIFS_STATS2
194 atomic_inc(&totSmBufAllocCount);
195#endif /* CONFIG_CIFS_STATS2 */
196
197 }
198 return ret_buf;
199}
200
201void
202cifs_small_buf_release(void *buf_to_free)
203{
204
205 if (buf_to_free == NULL) {
206 cFYI(1, "Null buffer passed to cifs_small_buf_release");
207 return;
208 }
209 mempool_free(buf_to_free, cifs_sm_req_poolp);
210
211 atomic_dec(&smBufAllocCount);
212 return;
213}
214
215/*
216 Find a free multiplex id (SMB mid). Otherwise there could be
217 mid collisions which might cause problems, demultiplexing the
218 wrong response to this request. Multiplex ids could collide if
219 one of a series requests takes much longer than the others, or
220 if a very large number of long lived requests (byte range
221 locks or FindNotify requests) are pending. No more than
222 64K-1 requests can be outstanding at one time. If no
223 mids are available, return zero. A future optimization
224 could make the combination of mids and uid the key we use
225 to demultiplex on (rather than mid alone).
226 In addition to the above check, the cifs demultiplex
227 code already used the command code as a secondary
228 check of the frame and if signing is negotiated the
229 response would be discarded if the mid were the same
230 but the signature was wrong. Since the mid is not put in the
231 pending queue until later (when it is about to be dispatched)
232 we do have to limit the number of outstanding requests
233 to somewhat less than 64K-1 although it is hard to imagine
234 so many threads being in the vfs at one time.
235*/
236__u16 GetNextMid(struct TCP_Server_Info *server)
237{
238 __u16 mid = 0;
239 __u16 last_mid;
240 bool collision;
241
242 spin_lock(&GlobalMid_Lock);
243 last_mid = server->CurrentMid; /* we do not want to loop forever */
244 server->CurrentMid++;
245 /* This nested loop looks more expensive than it is.
246 In practice the list of pending requests is short,
247 fewer than 50, and the mids are likely to be unique
248 on the first pass through the loop unless some request
249 takes longer than the 64 thousand requests before it
250 (and it would also have to have been a request that
251 did not time out) */
252 while (server->CurrentMid != last_mid) {
253 struct mid_q_entry *mid_entry;
254 unsigned int num_mids;
255
256 collision = false;
257 if (server->CurrentMid == 0)
258 server->CurrentMid++;
259
260 num_mids = 0;
261 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
262 ++num_mids;
263 if (mid_entry->mid == server->CurrentMid &&
264 mid_entry->midState == MID_REQUEST_SUBMITTED) {
265 /* This mid is in use, try a different one */
266 collision = true;
267 break;
268 }
269 }
270
271 /*
272 * if we have more than 32k mids in the list, then something
273 * is very wrong. Possibly a local user is trying to DoS the
274 * box by issuing long-running calls and SIGKILL'ing them. If
275 * we get to 2^16 mids then we're in big trouble as this
276 * function could loop forever.
277 *
278 * Go ahead and assign out the mid in this situation, but force
279 * an eventual reconnect to clean out the pending_mid_q.
280 */
281 if (num_mids > 32768)
282 server->tcpStatus = CifsNeedReconnect;
283
284 if (!collision) {
285 mid = server->CurrentMid;
286 break;
287 }
288 server->CurrentMid++;
289 }
290 spin_unlock(&GlobalMid_Lock);
291 return mid;
292}
293
294/* NB: MID can not be set if treeCon not passed in, in that
295 case it is responsbility of caller to set the mid */
296void
297header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
298 const struct cifs_tcon *treeCon, int word_count
299 /* length of fixed section (word count) in two byte units */)
300{
301 struct list_head *temp_item;
302 struct cifs_ses *ses;
303 char *temp = (char *) buffer;
304
305 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
306
307 buffer->smb_buf_length = cpu_to_be32(
308 (2 * word_count) + sizeof(struct smb_hdr) -
309 4 /* RFC 1001 length field does not count */ +
310 2 /* for bcc field itself */) ;
311
312 buffer->Protocol[0] = 0xFF;
313 buffer->Protocol[1] = 'S';
314 buffer->Protocol[2] = 'M';
315 buffer->Protocol[3] = 'B';
316 buffer->Command = smb_command;
317 buffer->Flags = 0x00; /* case sensitive */
318 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
319 buffer->Pid = cpu_to_le16((__u16)current->tgid);
320 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
321 if (treeCon) {
322 buffer->Tid = treeCon->tid;
323 if (treeCon->ses) {
324 if (treeCon->ses->capabilities & CAP_UNICODE)
325 buffer->Flags2 |= SMBFLG2_UNICODE;
326 if (treeCon->ses->capabilities & CAP_STATUS32)
327 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
328
329 /* Uid is not converted */
330 buffer->Uid = treeCon->ses->Suid;
331 buffer->Mid = GetNextMid(treeCon->ses->server);
332 if (multiuser_mount != 0) {
333 /* For the multiuser case, there are few obvious technically */
334 /* possible mechanisms to match the local linux user (uid) */
335 /* to a valid remote smb user (smb_uid): */
336 /* 1) Query Winbind (or other local pam/nss daemon */
337 /* for userid/password/logon_domain or credential */
338 /* 2) Query Winbind for uid to sid to username mapping */
339 /* and see if we have a matching password for existing*/
340 /* session for that user perhas getting password by */
341 /* adding a new pam_cifs module that stores passwords */
342 /* so that the cifs vfs can get at that for all logged*/
343 /* on users */
344 /* 3) (Which is the mechanism we have chosen) */
345 /* Search through sessions to the same server for a */
346 /* a match on the uid that was passed in on mount */
347 /* with the current processes uid (or euid?) and use */
348 /* that smb uid. If no existing smb session for */
349 /* that uid found, use the default smb session ie */
350 /* the smb session for the volume mounted which is */
351 /* the same as would be used if the multiuser mount */
352 /* flag were disabled. */
353
354 /* BB Add support for establishing new tCon and SMB Session */
355 /* with userid/password pairs found on the smb session */
356 /* for other target tcp/ip addresses BB */
357 if (current_fsuid() != treeCon->ses->linux_uid) {
358 cFYI(1, "Multiuser mode and UID "
359 "did not match tcon uid");
360 spin_lock(&cifs_tcp_ses_lock);
361 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
362 ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
363 if (ses->linux_uid == current_fsuid()) {
364 if (ses->server == treeCon->ses->server) {
365 cFYI(1, "found matching uid substitute right smb_uid");
366 buffer->Uid = ses->Suid;
367 break;
368 } else {
369 /* BB eventually call cifs_setup_session here */
370 cFYI(1, "local UID found but no smb sess with this server exists");
371 }
372 }
373 }
374 spin_unlock(&cifs_tcp_ses_lock);
375 }
376 }
377 }
378 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
379 buffer->Flags2 |= SMBFLG2_DFS;
380 if (treeCon->nocase)
381 buffer->Flags |= SMBFLG_CASELESS;
382 if ((treeCon->ses) && (treeCon->ses->server))
383 if (treeCon->ses->server->sec_mode &
384 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
385 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
386 }
387
388/* endian conversion of flags is now done just before sending */
389 buffer->WordCount = (char) word_count;
390 return;
391}
392
393static int
394check_smb_hdr(struct smb_hdr *smb, __u16 mid)
395{
396 /* does it have the right SMB "signature" ? */
397 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
398 cERROR(1, "Bad protocol string signature header 0x%x",
399 *(unsigned int *)smb->Protocol);
400 return 1;
401 }
402
403 /* Make sure that message ids match */
404 if (mid != smb->Mid) {
405 cERROR(1, "Mids do not match. received=%u expected=%u",
406 smb->Mid, mid);
407 return 1;
408 }
409
410 /* if it's a response then accept */
411 if (smb->Flags & SMBFLG_RESPONSE)
412 return 0;
413
414 /* only one valid case where server sends us request */
415 if (smb->Command == SMB_COM_LOCKING_ANDX)
416 return 0;
417
418 cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
419 return 1;
420}
421
422int
423checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
424{
425 __u32 len = be32_to_cpu(smb->smb_buf_length);
426 __u32 clc_len; /* calculated length */
427 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
428
429 if (length < 2 + sizeof(struct smb_hdr)) {
430 if ((length >= sizeof(struct smb_hdr) - 1)
431 && (smb->Status.CifsError != 0)) {
432 smb->WordCount = 0;
433 /* some error cases do not return wct and bcc */
434 return 0;
435 } else if ((length == sizeof(struct smb_hdr) + 1) &&
436 (smb->WordCount == 0)) {
437 char *tmp = (char *)smb;
438 /* Need to work around a bug in two servers here */
439 /* First, check if the part of bcc they sent was zero */
440 if (tmp[sizeof(struct smb_hdr)] == 0) {
441 /* some servers return only half of bcc
442 * on simple responses (wct, bcc both zero)
443 * in particular have seen this on
444 * ulogoffX and FindClose. This leaves
445 * one byte of bcc potentially unitialized
446 */
447 /* zero rest of bcc */
448 tmp[sizeof(struct smb_hdr)+1] = 0;
449 return 0;
450 }
451 cERROR(1, "rcvd invalid byte count (bcc)");
452 } else {
453 cERROR(1, "Length less than smb header size");
454 }
455 return 1;
456 }
457 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
458 cERROR(1, "smb length greater than MaxBufSize, mid=%d",
459 smb->Mid);
460 return 1;
461 }
462
463 if (check_smb_hdr(smb, mid))
464 return 1;
465 clc_len = smbCalcSize(smb);
466
467 if (4 + len != length) {
468 cERROR(1, "Length read does not match RFC1001 length %d",
469 len);
470 return 1;
471 }
472
473 if (4 + len != clc_len) {
474 /* check if bcc wrapped around for large read responses */
475 if ((len > 64 * 1024) && (len > clc_len)) {
476 /* check if lengths match mod 64K */
477 if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
478 return 0; /* bcc wrapped */
479 }
480 cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
481 clc_len, 4 + len, smb->Mid);
482
483 if (4 + len < clc_len) {
484 cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
485 len, smb->Mid);
486 return 1;
487 } else if (len > clc_len + 512) {
488 /*
489 * Some servers (Windows XP in particular) send more
490 * data than the lengths in the SMB packet would
491 * indicate on certain calls (byte range locks and
492 * trans2 find first calls in particular). While the
493 * client can handle such a frame by ignoring the
494 * trailing data, we choose limit the amount of extra
495 * data to 512 bytes.
496 */
497 cERROR(1, "RFC1001 size %u more than 512 bytes larger "
498 "than SMB for mid=%u", len, smb->Mid);
499 return 1;
500 }
501 }
502 return 0;
503}
504
505bool
506is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
507{
508 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
509 struct list_head *tmp, *tmp1, *tmp2;
510 struct cifs_ses *ses;
511 struct cifs_tcon *tcon;
512 struct cifsInodeInfo *pCifsInode;
513 struct cifsFileInfo *netfile;
514
515 cFYI(1, "Checking for oplock break or dnotify response");
516 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
517 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
518 struct smb_com_transaction_change_notify_rsp *pSMBr =
519 (struct smb_com_transaction_change_notify_rsp *)buf;
520 struct file_notify_information *pnotify;
521 __u32 data_offset = 0;
522 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
523 data_offset = le32_to_cpu(pSMBr->DataOffset);
524
525 pnotify = (struct file_notify_information *)
526 ((char *)&pSMBr->hdr.Protocol + data_offset);
527 cFYI(1, "dnotify on %s Action: 0x%x",
528 pnotify->FileName, pnotify->Action);
529 /* cifs_dump_mem("Rcvd notify Data: ",buf,
530 sizeof(struct smb_hdr)+60); */
531 return true;
532 }
533 if (pSMBr->hdr.Status.CifsError) {
534 cFYI(1, "notify err 0x%d",
535 pSMBr->hdr.Status.CifsError);
536 return true;
537 }
538 return false;
539 }
540 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
541 return false;
542 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
543 /* no sense logging error on invalid handle on oplock
544 break - harmless race between close request and oplock
545 break response is expected from time to time writing out
546 large dirty files cached on the client */
547 if ((NT_STATUS_INVALID_HANDLE) ==
548 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
549 cFYI(1, "invalid handle on oplock break");
550 return true;
551 } else if (ERRbadfid ==
552 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
553 return true;
554 } else {
555 return false; /* on valid oplock brk we get "request" */
556 }
557 }
558 if (pSMB->hdr.WordCount != 8)
559 return false;
560
561 cFYI(1, "oplock type 0x%d level 0x%d",
562 pSMB->LockType, pSMB->OplockLevel);
563 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
564 return false;
565
566 /* look up tcon based on tid & uid */
567 spin_lock(&cifs_tcp_ses_lock);
568 list_for_each(tmp, &srv->smb_ses_list) {
569 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
570 list_for_each(tmp1, &ses->tcon_list) {
571 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
572 if (tcon->tid != buf->Tid)
573 continue;
574
575 cifs_stats_inc(&tcon->num_oplock_brks);
576 spin_lock(&cifs_file_list_lock);
577 list_for_each(tmp2, &tcon->openFileList) {
578 netfile = list_entry(tmp2, struct cifsFileInfo,
579 tlist);
580 if (pSMB->Fid != netfile->netfid)
581 continue;
582
583 cFYI(1, "file id match, oplock break");
584 pCifsInode = CIFS_I(netfile->dentry->d_inode);
585
586 cifs_set_oplock_level(pCifsInode,
587 pSMB->OplockLevel ? OPLOCK_READ : 0);
588 queue_work(system_nrt_wq,
589 &netfile->oplock_break);
590 netfile->oplock_break_cancelled = false;
591
592 spin_unlock(&cifs_file_list_lock);
593 spin_unlock(&cifs_tcp_ses_lock);
594 return true;
595 }
596 spin_unlock(&cifs_file_list_lock);
597 spin_unlock(&cifs_tcp_ses_lock);
598 cFYI(1, "No matching file for oplock break");
599 return true;
600 }
601 }
602 spin_unlock(&cifs_tcp_ses_lock);
603 cFYI(1, "Can not process oplock break for non-existent connection");
604 return true;
605}
606
607void
608dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
609{
610 int i, j;
611 char debug_line[17];
612 unsigned char *buffer;
613
614 if (traceSMB == 0)
615 return;
616
617 buffer = (unsigned char *) smb_buf;
618 for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
619 if (i % 8 == 0) {
620 /* have reached the beginning of line */
621 printk(KERN_DEBUG "| ");
622 j = 0;
623 }
624 printk("%0#4x ", buffer[i]);
625 debug_line[2 * j] = ' ';
626 if (isprint(buffer[i]))
627 debug_line[1 + (2 * j)] = buffer[i];
628 else
629 debug_line[1 + (2 * j)] = '_';
630
631 if (i % 8 == 7) {
632 /* reached end of line, time to print ascii */
633 debug_line[16] = 0;
634 printk(" | %s\n", debug_line);
635 }
636 }
637 for (; j < 8; j++) {
638 printk(" ");
639 debug_line[2 * j] = ' ';
640 debug_line[1 + (2 * j)] = ' ';
641 }
642 printk(" | %s\n", debug_line);
643 return;
644}
645
646void
647cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
648{
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
650 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
651 cERROR(1, "Autodisabling the use of server inode numbers on "
652 "%s. This server doesn't seem to support them "
653 "properly. Hardlinks will not be recognized on this "
654 "mount. Consider mounting with the \"noserverino\" "
655 "option to silence this message.",
656 cifs_sb_master_tcon(cifs_sb)->treeName);
657 }
658}
659
660void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
661{
662 oplock &= 0xF;
663
664 if (oplock == OPLOCK_EXCLUSIVE) {
665 cinode->clientCanCacheAll = true;
666 cinode->clientCanCacheRead = true;
667 cFYI(1, "Exclusive Oplock granted on inode %p",
668 &cinode->vfs_inode);
669 } else if (oplock == OPLOCK_READ) {
670 cinode->clientCanCacheAll = false;
671 cinode->clientCanCacheRead = true;
672 cFYI(1, "Level II Oplock granted on inode %p",
673 &cinode->vfs_inode);
674 } else {
675 cinode->clientCanCacheAll = false;
676 cinode->clientCanCacheRead = false;
677 }
678}
1// SPDX-License-Identifier: LGPL-2.1
2/*
3 * fs/cifs/misc.c
4 *
5 * Copyright (C) International Business Machines Corp., 2002,2008
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 *
8 */
9
10#include <linux/slab.h>
11#include <linux/ctype.h>
12#include <linux/mempool.h>
13#include <linux/vmalloc.h>
14#include "cifspdu.h"
15#include "cifsglob.h"
16#include "cifsproto.h"
17#include "cifs_debug.h"
18#include "smberr.h"
19#include "nterr.h"
20#include "cifs_unicode.h"
21#include "smb2pdu.h"
22#include "cifsfs.h"
23#ifdef CONFIG_CIFS_DFS_UPCALL
24#include "dns_resolve.h"
25#endif
26#include "fs_context.h"
27
28extern mempool_t *cifs_sm_req_poolp;
29extern mempool_t *cifs_req_poolp;
30
31/* The xid serves as a useful identifier for each incoming vfs request,
32 in a similar way to the mid which is useful to track each sent smb,
33 and CurrentXid can also provide a running counter (although it
34 will eventually wrap past zero) of the total vfs operations handled
35 since the cifs fs was mounted */
36
37unsigned int
38_get_xid(void)
39{
40 unsigned int xid;
41
42 spin_lock(&GlobalMid_Lock);
43 GlobalTotalActiveXid++;
44
45 /* keep high water mark for number of simultaneous ops in filesystem */
46 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
47 GlobalMaxActiveXid = GlobalTotalActiveXid;
48 if (GlobalTotalActiveXid > 65000)
49 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
50 xid = GlobalCurrentXid++;
51 spin_unlock(&GlobalMid_Lock);
52 return xid;
53}
54
55void
56_free_xid(unsigned int xid)
57{
58 spin_lock(&GlobalMid_Lock);
59 /* if (GlobalTotalActiveXid == 0)
60 BUG(); */
61 GlobalTotalActiveXid--;
62 spin_unlock(&GlobalMid_Lock);
63}
64
65struct cifs_ses *
66sesInfoAlloc(void)
67{
68 struct cifs_ses *ret_buf;
69
70 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
71 if (ret_buf) {
72 atomic_inc(&sesInfoAllocCount);
73 ret_buf->status = CifsNew;
74 ++ret_buf->ses_count;
75 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
76 INIT_LIST_HEAD(&ret_buf->tcon_list);
77 mutex_init(&ret_buf->session_mutex);
78 spin_lock_init(&ret_buf->iface_lock);
79 }
80 return ret_buf;
81}
82
83void
84sesInfoFree(struct cifs_ses *buf_to_free)
85{
86 if (buf_to_free == NULL) {
87 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
88 return;
89 }
90
91 atomic_dec(&sesInfoAllocCount);
92 kfree(buf_to_free->serverOS);
93 kfree(buf_to_free->serverDomain);
94 kfree(buf_to_free->serverNOS);
95 kfree_sensitive(buf_to_free->password);
96 kfree(buf_to_free->user_name);
97 kfree(buf_to_free->domainName);
98 kfree_sensitive(buf_to_free->auth_key.response);
99 kfree(buf_to_free->iface_list);
100 kfree_sensitive(buf_to_free);
101}
102
103struct cifs_tcon *
104tconInfoAlloc(void)
105{
106 struct cifs_tcon *ret_buf;
107
108 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
109 if (!ret_buf)
110 return NULL;
111 ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL);
112 if (!ret_buf->crfid.fid) {
113 kfree(ret_buf);
114 return NULL;
115 }
116
117 atomic_inc(&tconInfoAllocCount);
118 ret_buf->tidStatus = CifsNew;
119 ++ret_buf->tc_count;
120 INIT_LIST_HEAD(&ret_buf->openFileList);
121 INIT_LIST_HEAD(&ret_buf->tcon_list);
122 spin_lock_init(&ret_buf->open_file_lock);
123 mutex_init(&ret_buf->crfid.fid_mutex);
124 spin_lock_init(&ret_buf->stat_lock);
125 atomic_set(&ret_buf->num_local_opens, 0);
126 atomic_set(&ret_buf->num_remote_opens, 0);
127
128 return ret_buf;
129}
130
131void
132tconInfoFree(struct cifs_tcon *buf_to_free)
133{
134 if (buf_to_free == NULL) {
135 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
136 return;
137 }
138 atomic_dec(&tconInfoAllocCount);
139 kfree(buf_to_free->nativeFileSystem);
140 kfree_sensitive(buf_to_free->password);
141 kfree(buf_to_free->crfid.fid);
142#ifdef CONFIG_CIFS_DFS_UPCALL
143 kfree(buf_to_free->dfs_path);
144#endif
145 kfree(buf_to_free);
146}
147
148struct smb_hdr *
149cifs_buf_get(void)
150{
151 struct smb_hdr *ret_buf = NULL;
152 /*
153 * SMB2 header is bigger than CIFS one - no problems to clean some
154 * more bytes for CIFS.
155 */
156 size_t buf_size = sizeof(struct smb2_sync_hdr);
157
158 /*
159 * We could use negotiated size instead of max_msgsize -
160 * but it may be more efficient to always alloc same size
161 * albeit slightly larger than necessary and maxbuffersize
162 * defaults to this and can not be bigger.
163 */
164 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
165
166 /* clear the first few header bytes */
167 /* for most paths, more is cleared in header_assemble */
168 memset(ret_buf, 0, buf_size + 3);
169 atomic_inc(&bufAllocCount);
170#ifdef CONFIG_CIFS_STATS2
171 atomic_inc(&totBufAllocCount);
172#endif /* CONFIG_CIFS_STATS2 */
173
174 return ret_buf;
175}
176
177void
178cifs_buf_release(void *buf_to_free)
179{
180 if (buf_to_free == NULL) {
181 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
182 return;
183 }
184 mempool_free(buf_to_free, cifs_req_poolp);
185
186 atomic_dec(&bufAllocCount);
187 return;
188}
189
190struct smb_hdr *
191cifs_small_buf_get(void)
192{
193 struct smb_hdr *ret_buf = NULL;
194
195/* We could use negotiated size instead of max_msgsize -
196 but it may be more efficient to always alloc same size
197 albeit slightly larger than necessary and maxbuffersize
198 defaults to this and can not be bigger */
199 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
200 /* No need to clear memory here, cleared in header assemble */
201 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
202 atomic_inc(&smBufAllocCount);
203#ifdef CONFIG_CIFS_STATS2
204 atomic_inc(&totSmBufAllocCount);
205#endif /* CONFIG_CIFS_STATS2 */
206
207 return ret_buf;
208}
209
210void
211cifs_small_buf_release(void *buf_to_free)
212{
213
214 if (buf_to_free == NULL) {
215 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
216 return;
217 }
218 mempool_free(buf_to_free, cifs_sm_req_poolp);
219
220 atomic_dec(&smBufAllocCount);
221 return;
222}
223
224void
225free_rsp_buf(int resp_buftype, void *rsp)
226{
227 if (resp_buftype == CIFS_SMALL_BUFFER)
228 cifs_small_buf_release(rsp);
229 else if (resp_buftype == CIFS_LARGE_BUFFER)
230 cifs_buf_release(rsp);
231}
232
233/* NB: MID can not be set if treeCon not passed in, in that
234 case it is responsbility of caller to set the mid */
235void
236header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
237 const struct cifs_tcon *treeCon, int word_count
238 /* length of fixed section (word count) in two byte units */)
239{
240 char *temp = (char *) buffer;
241
242 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
243
244 buffer->smb_buf_length = cpu_to_be32(
245 (2 * word_count) + sizeof(struct smb_hdr) -
246 4 /* RFC 1001 length field does not count */ +
247 2 /* for bcc field itself */) ;
248
249 buffer->Protocol[0] = 0xFF;
250 buffer->Protocol[1] = 'S';
251 buffer->Protocol[2] = 'M';
252 buffer->Protocol[3] = 'B';
253 buffer->Command = smb_command;
254 buffer->Flags = 0x00; /* case sensitive */
255 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
256 buffer->Pid = cpu_to_le16((__u16)current->tgid);
257 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
258 if (treeCon) {
259 buffer->Tid = treeCon->tid;
260 if (treeCon->ses) {
261 if (treeCon->ses->capabilities & CAP_UNICODE)
262 buffer->Flags2 |= SMBFLG2_UNICODE;
263 if (treeCon->ses->capabilities & CAP_STATUS32)
264 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
265
266 /* Uid is not converted */
267 buffer->Uid = treeCon->ses->Suid;
268 buffer->Mid = get_next_mid(treeCon->ses->server);
269 }
270 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
271 buffer->Flags2 |= SMBFLG2_DFS;
272 if (treeCon->nocase)
273 buffer->Flags |= SMBFLG_CASELESS;
274 if ((treeCon->ses) && (treeCon->ses->server))
275 if (treeCon->ses->server->sign)
276 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
277 }
278
279/* endian conversion of flags is now done just before sending */
280 buffer->WordCount = (char) word_count;
281 return;
282}
283
284static int
285check_smb_hdr(struct smb_hdr *smb)
286{
287 /* does it have the right SMB "signature" ? */
288 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
289 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
290 *(unsigned int *)smb->Protocol);
291 return 1;
292 }
293
294 /* if it's a response then accept */
295 if (smb->Flags & SMBFLG_RESPONSE)
296 return 0;
297
298 /* only one valid case where server sends us request */
299 if (smb->Command == SMB_COM_LOCKING_ANDX)
300 return 0;
301
302 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
303 get_mid(smb));
304 return 1;
305}
306
307int
308checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
309{
310 struct smb_hdr *smb = (struct smb_hdr *)buf;
311 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
312 __u32 clc_len; /* calculated length */
313 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
314 total_read, rfclen);
315
316 /* is this frame too small to even get to a BCC? */
317 if (total_read < 2 + sizeof(struct smb_hdr)) {
318 if ((total_read >= sizeof(struct smb_hdr) - 1)
319 && (smb->Status.CifsError != 0)) {
320 /* it's an error return */
321 smb->WordCount = 0;
322 /* some error cases do not return wct and bcc */
323 return 0;
324 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
325 (smb->WordCount == 0)) {
326 char *tmp = (char *)smb;
327 /* Need to work around a bug in two servers here */
328 /* First, check if the part of bcc they sent was zero */
329 if (tmp[sizeof(struct smb_hdr)] == 0) {
330 /* some servers return only half of bcc
331 * on simple responses (wct, bcc both zero)
332 * in particular have seen this on
333 * ulogoffX and FindClose. This leaves
334 * one byte of bcc potentially unitialized
335 */
336 /* zero rest of bcc */
337 tmp[sizeof(struct smb_hdr)+1] = 0;
338 return 0;
339 }
340 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
341 } else {
342 cifs_dbg(VFS, "Length less than smb header size\n");
343 }
344 return -EIO;
345 }
346
347 /* otherwise, there is enough to get to the BCC */
348 if (check_smb_hdr(smb))
349 return -EIO;
350 clc_len = smbCalcSize(smb, server);
351
352 if (4 + rfclen != total_read) {
353 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
354 rfclen);
355 return -EIO;
356 }
357
358 if (4 + rfclen != clc_len) {
359 __u16 mid = get_mid(smb);
360 /* check if bcc wrapped around for large read responses */
361 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
362 /* check if lengths match mod 64K */
363 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
364 return 0; /* bcc wrapped */
365 }
366 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
367 clc_len, 4 + rfclen, mid);
368
369 if (4 + rfclen < clc_len) {
370 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
371 rfclen, mid);
372 return -EIO;
373 } else if (rfclen > clc_len + 512) {
374 /*
375 * Some servers (Windows XP in particular) send more
376 * data than the lengths in the SMB packet would
377 * indicate on certain calls (byte range locks and
378 * trans2 find first calls in particular). While the
379 * client can handle such a frame by ignoring the
380 * trailing data, we choose limit the amount of extra
381 * data to 512 bytes.
382 */
383 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
384 rfclen, mid);
385 return -EIO;
386 }
387 }
388 return 0;
389}
390
391bool
392is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
393{
394 struct smb_hdr *buf = (struct smb_hdr *)buffer;
395 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
396 struct list_head *tmp, *tmp1, *tmp2;
397 struct cifs_ses *ses;
398 struct cifs_tcon *tcon;
399 struct cifsInodeInfo *pCifsInode;
400 struct cifsFileInfo *netfile;
401
402 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
403 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
404 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
405 struct smb_com_transaction_change_notify_rsp *pSMBr =
406 (struct smb_com_transaction_change_notify_rsp *)buf;
407 struct file_notify_information *pnotify;
408 __u32 data_offset = 0;
409 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
410
411 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
412 data_offset = le32_to_cpu(pSMBr->DataOffset);
413
414 if (data_offset >
415 len - sizeof(struct file_notify_information)) {
416 cifs_dbg(FYI, "Invalid data_offset %u\n",
417 data_offset);
418 return true;
419 }
420 pnotify = (struct file_notify_information *)
421 ((char *)&pSMBr->hdr.Protocol + data_offset);
422 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
423 pnotify->FileName, pnotify->Action);
424 /* cifs_dump_mem("Rcvd notify Data: ",buf,
425 sizeof(struct smb_hdr)+60); */
426 return true;
427 }
428 if (pSMBr->hdr.Status.CifsError) {
429 cifs_dbg(FYI, "notify err 0x%x\n",
430 pSMBr->hdr.Status.CifsError);
431 return true;
432 }
433 return false;
434 }
435 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
436 return false;
437 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
438 /* no sense logging error on invalid handle on oplock
439 break - harmless race between close request and oplock
440 break response is expected from time to time writing out
441 large dirty files cached on the client */
442 if ((NT_STATUS_INVALID_HANDLE) ==
443 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
444 cifs_dbg(FYI, "Invalid handle on oplock break\n");
445 return true;
446 } else if (ERRbadfid ==
447 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
448 return true;
449 } else {
450 return false; /* on valid oplock brk we get "request" */
451 }
452 }
453 if (pSMB->hdr.WordCount != 8)
454 return false;
455
456 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
457 pSMB->LockType, pSMB->OplockLevel);
458 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
459 return false;
460
461 /* look up tcon based on tid & uid */
462 spin_lock(&cifs_tcp_ses_lock);
463 list_for_each(tmp, &srv->smb_ses_list) {
464 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
465 list_for_each(tmp1, &ses->tcon_list) {
466 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
467 if (tcon->tid != buf->Tid)
468 continue;
469
470 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
471 spin_lock(&tcon->open_file_lock);
472 list_for_each(tmp2, &tcon->openFileList) {
473 netfile = list_entry(tmp2, struct cifsFileInfo,
474 tlist);
475 if (pSMB->Fid != netfile->fid.netfid)
476 continue;
477
478 cifs_dbg(FYI, "file id match, oplock break\n");
479 pCifsInode = CIFS_I(d_inode(netfile->dentry));
480
481 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
482 &pCifsInode->flags);
483
484 netfile->oplock_epoch = 0;
485 netfile->oplock_level = pSMB->OplockLevel;
486 netfile->oplock_break_cancelled = false;
487 cifs_queue_oplock_break(netfile);
488
489 spin_unlock(&tcon->open_file_lock);
490 spin_unlock(&cifs_tcp_ses_lock);
491 return true;
492 }
493 spin_unlock(&tcon->open_file_lock);
494 spin_unlock(&cifs_tcp_ses_lock);
495 cifs_dbg(FYI, "No matching file for oplock break\n");
496 return true;
497 }
498 }
499 spin_unlock(&cifs_tcp_ses_lock);
500 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
501 return true;
502}
503
504void
505dump_smb(void *buf, int smb_buf_length)
506{
507 if (traceSMB == 0)
508 return;
509
510 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
511 smb_buf_length, true);
512}
513
514void
515cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
516{
517 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
518 struct cifs_tcon *tcon = NULL;
519
520 if (cifs_sb->master_tlink)
521 tcon = cifs_sb_master_tcon(cifs_sb);
522
523 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
524 cifs_sb->mnt_cifs_serverino_autodisabled = true;
525 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
526 tcon ? tcon->treeName : "new server");
527 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
528 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
529
530 }
531}
532
533void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
534{
535 oplock &= 0xF;
536
537 if (oplock == OPLOCK_EXCLUSIVE) {
538 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
539 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
540 &cinode->vfs_inode);
541 } else if (oplock == OPLOCK_READ) {
542 cinode->oplock = CIFS_CACHE_READ_FLG;
543 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
544 &cinode->vfs_inode);
545 } else
546 cinode->oplock = 0;
547}
548
549/*
550 * We wait for oplock breaks to be processed before we attempt to perform
551 * writes.
552 */
553int cifs_get_writer(struct cifsInodeInfo *cinode)
554{
555 int rc;
556
557start:
558 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
559 TASK_KILLABLE);
560 if (rc)
561 return rc;
562
563 spin_lock(&cinode->writers_lock);
564 if (!cinode->writers)
565 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
566 cinode->writers++;
567 /* Check to see if we have started servicing an oplock break */
568 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
569 cinode->writers--;
570 if (cinode->writers == 0) {
571 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
572 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
573 }
574 spin_unlock(&cinode->writers_lock);
575 goto start;
576 }
577 spin_unlock(&cinode->writers_lock);
578 return 0;
579}
580
581void cifs_put_writer(struct cifsInodeInfo *cinode)
582{
583 spin_lock(&cinode->writers_lock);
584 cinode->writers--;
585 if (cinode->writers == 0) {
586 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
587 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
588 }
589 spin_unlock(&cinode->writers_lock);
590}
591
592/**
593 * cifs_queue_oplock_break - queue the oplock break handler for cfile
594 *
595 * This function is called from the demultiplex thread when it
596 * receives an oplock break for @cfile.
597 *
598 * Assumes the tcon->open_file_lock is held.
599 * Assumes cfile->file_info_lock is NOT held.
600 */
601void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
602{
603 /*
604 * Bump the handle refcount now while we hold the
605 * open_file_lock to enforce the validity of it for the oplock
606 * break handler. The matching put is done at the end of the
607 * handler.
608 */
609 cifsFileInfo_get(cfile);
610
611 queue_work(cifsoplockd_wq, &cfile->oplock_break);
612}
613
614void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
615{
616 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
617 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
618}
619
620bool
621backup_cred(struct cifs_sb_info *cifs_sb)
622{
623 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
624 if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
625 return true;
626 }
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
628 if (in_group_p(cifs_sb->ctx->backupgid))
629 return true;
630 }
631
632 return false;
633}
634
635void
636cifs_del_pending_open(struct cifs_pending_open *open)
637{
638 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
639 list_del(&open->olist);
640 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
641}
642
643void
644cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
645 struct cifs_pending_open *open)
646{
647 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
648 open->oplock = CIFS_OPLOCK_NO_CHANGE;
649 open->tlink = tlink;
650 fid->pending_open = open;
651 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
652}
653
654void
655cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
656 struct cifs_pending_open *open)
657{
658 spin_lock(&tlink_tcon(tlink)->open_file_lock);
659 cifs_add_pending_open_locked(fid, tlink, open);
660 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
661}
662
663/*
664 * Critical section which runs after acquiring deferred_lock.
665 * As there is no reference count on cifs_deferred_close, pdclose
666 * should not be used outside deferred_lock.
667 */
668bool
669cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
670{
671 struct cifs_deferred_close *dclose;
672
673 list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
674 if ((dclose->netfid == cfile->fid.netfid) &&
675 (dclose->persistent_fid == cfile->fid.persistent_fid) &&
676 (dclose->volatile_fid == cfile->fid.volatile_fid)) {
677 *pdclose = dclose;
678 return true;
679 }
680 }
681 return false;
682}
683
684/*
685 * Critical section which runs after acquiring deferred_lock.
686 */
687void
688cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
689{
690 bool is_deferred = false;
691 struct cifs_deferred_close *pdclose;
692
693 is_deferred = cifs_is_deferred_close(cfile, &pdclose);
694 if (is_deferred) {
695 kfree(dclose);
696 return;
697 }
698
699 dclose->tlink = cfile->tlink;
700 dclose->netfid = cfile->fid.netfid;
701 dclose->persistent_fid = cfile->fid.persistent_fid;
702 dclose->volatile_fid = cfile->fid.volatile_fid;
703 list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
704}
705
706/*
707 * Critical section which runs after acquiring deferred_lock.
708 */
709void
710cifs_del_deferred_close(struct cifsFileInfo *cfile)
711{
712 bool is_deferred = false;
713 struct cifs_deferred_close *dclose;
714
715 is_deferred = cifs_is_deferred_close(cfile, &dclose);
716 if (!is_deferred)
717 return;
718 list_del(&dclose->dlist);
719 kfree(dclose);
720}
721
722void
723cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
724{
725 struct cifsFileInfo *cfile = NULL;
726 struct file_list *tmp_list, *tmp_next_list;
727 struct list_head file_head;
728
729 if (cifs_inode == NULL)
730 return;
731
732 INIT_LIST_HEAD(&file_head);
733 spin_lock(&cifs_inode->open_file_lock);
734 list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
735 if (delayed_work_pending(&cfile->deferred)) {
736 if (cancel_delayed_work(&cfile->deferred)) {
737 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
738 if (tmp_list == NULL)
739 break;
740 tmp_list->cfile = cfile;
741 list_add_tail(&tmp_list->list, &file_head);
742 }
743 }
744 }
745 spin_unlock(&cifs_inode->open_file_lock);
746
747 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
748 _cifsFileInfo_put(tmp_list->cfile, true, false);
749 list_del(&tmp_list->list);
750 kfree(tmp_list);
751 }
752}
753
754void
755cifs_close_all_deferred_files(struct cifs_tcon *tcon)
756{
757 struct cifsFileInfo *cfile;
758 struct list_head *tmp;
759 struct file_list *tmp_list, *tmp_next_list;
760 struct list_head file_head;
761
762 INIT_LIST_HEAD(&file_head);
763 spin_lock(&tcon->open_file_lock);
764 list_for_each(tmp, &tcon->openFileList) {
765 cfile = list_entry(tmp, struct cifsFileInfo, tlist);
766 if (delayed_work_pending(&cfile->deferred)) {
767 if (cancel_delayed_work(&cfile->deferred)) {
768 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
769 if (tmp_list == NULL)
770 break;
771 tmp_list->cfile = cfile;
772 list_add_tail(&tmp_list->list, &file_head);
773 }
774 }
775 }
776 spin_unlock(&tcon->open_file_lock);
777
778 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
779 _cifsFileInfo_put(tmp_list->cfile, true, false);
780 list_del(&tmp_list->list);
781 kfree(tmp_list);
782 }
783}
784
785/* parses DFS refferal V3 structure
786 * caller is responsible for freeing target_nodes
787 * returns:
788 * - on success - 0
789 * - on failure - errno
790 */
791int
792parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
793 unsigned int *num_of_nodes,
794 struct dfs_info3_param **target_nodes,
795 const struct nls_table *nls_codepage, int remap,
796 const char *searchName, bool is_unicode)
797{
798 int i, rc = 0;
799 char *data_end;
800 struct dfs_referral_level_3 *ref;
801
802 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
803
804 if (*num_of_nodes < 1) {
805 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
806 *num_of_nodes);
807 rc = -EINVAL;
808 goto parse_DFS_referrals_exit;
809 }
810
811 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
812 if (ref->VersionNumber != cpu_to_le16(3)) {
813 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
814 le16_to_cpu(ref->VersionNumber));
815 rc = -EINVAL;
816 goto parse_DFS_referrals_exit;
817 }
818
819 /* get the upper boundary of the resp buffer */
820 data_end = (char *)rsp + rsp_size;
821
822 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
823 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
824
825 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
826 GFP_KERNEL);
827 if (*target_nodes == NULL) {
828 rc = -ENOMEM;
829 goto parse_DFS_referrals_exit;
830 }
831
832 /* collect necessary data from referrals */
833 for (i = 0; i < *num_of_nodes; i++) {
834 char *temp;
835 int max_len;
836 struct dfs_info3_param *node = (*target_nodes)+i;
837
838 node->flags = le32_to_cpu(rsp->DFSFlags);
839 if (is_unicode) {
840 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
841 GFP_KERNEL);
842 if (tmp == NULL) {
843 rc = -ENOMEM;
844 goto parse_DFS_referrals_exit;
845 }
846 cifsConvertToUTF16((__le16 *) tmp, searchName,
847 PATH_MAX, nls_codepage, remap);
848 node->path_consumed = cifs_utf16_bytes(tmp,
849 le16_to_cpu(rsp->PathConsumed),
850 nls_codepage);
851 kfree(tmp);
852 } else
853 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
854
855 node->server_type = le16_to_cpu(ref->ServerType);
856 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
857
858 /* copy DfsPath */
859 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
860 max_len = data_end - temp;
861 node->path_name = cifs_strndup_from_utf16(temp, max_len,
862 is_unicode, nls_codepage);
863 if (!node->path_name) {
864 rc = -ENOMEM;
865 goto parse_DFS_referrals_exit;
866 }
867
868 /* copy link target UNC */
869 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
870 max_len = data_end - temp;
871 node->node_name = cifs_strndup_from_utf16(temp, max_len,
872 is_unicode, nls_codepage);
873 if (!node->node_name) {
874 rc = -ENOMEM;
875 goto parse_DFS_referrals_exit;
876 }
877
878 node->ttl = le32_to_cpu(ref->TimeToLive);
879
880 ref++;
881 }
882
883parse_DFS_referrals_exit:
884 if (rc) {
885 free_dfs_info_array(*target_nodes, *num_of_nodes);
886 *target_nodes = NULL;
887 *num_of_nodes = 0;
888 }
889 return rc;
890}
891
892struct cifs_aio_ctx *
893cifs_aio_ctx_alloc(void)
894{
895 struct cifs_aio_ctx *ctx;
896
897 /*
898 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
899 * to false so that we know when we have to unreference pages within
900 * cifs_aio_ctx_release()
901 */
902 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
903 if (!ctx)
904 return NULL;
905
906 INIT_LIST_HEAD(&ctx->list);
907 mutex_init(&ctx->aio_mutex);
908 init_completion(&ctx->done);
909 kref_init(&ctx->refcount);
910 return ctx;
911}
912
913void
914cifs_aio_ctx_release(struct kref *refcount)
915{
916 struct cifs_aio_ctx *ctx = container_of(refcount,
917 struct cifs_aio_ctx, refcount);
918
919 cifsFileInfo_put(ctx->cfile);
920
921 /*
922 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
923 * which means that iov_iter_get_pages() was a success and thus that
924 * we have taken reference on pages.
925 */
926 if (ctx->bv) {
927 unsigned i;
928
929 for (i = 0; i < ctx->npages; i++) {
930 if (ctx->should_dirty)
931 set_page_dirty(ctx->bv[i].bv_page);
932 put_page(ctx->bv[i].bv_page);
933 }
934 kvfree(ctx->bv);
935 }
936
937 kfree(ctx);
938}
939
940#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
941
942int
943setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
944{
945 ssize_t rc;
946 unsigned int cur_npages;
947 unsigned int npages = 0;
948 unsigned int i;
949 size_t len;
950 size_t count = iov_iter_count(iter);
951 unsigned int saved_len;
952 size_t start;
953 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
954 struct page **pages = NULL;
955 struct bio_vec *bv = NULL;
956
957 if (iov_iter_is_kvec(iter)) {
958 memcpy(&ctx->iter, iter, sizeof(*iter));
959 ctx->len = count;
960 iov_iter_advance(iter, count);
961 return 0;
962 }
963
964 if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
965 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
966
967 if (!bv) {
968 bv = vmalloc(array_size(max_pages, sizeof(*bv)));
969 if (!bv)
970 return -ENOMEM;
971 }
972
973 if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
974 pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
975
976 if (!pages) {
977 pages = vmalloc(array_size(max_pages, sizeof(*pages)));
978 if (!pages) {
979 kvfree(bv);
980 return -ENOMEM;
981 }
982 }
983
984 saved_len = count;
985
986 while (count && npages < max_pages) {
987 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
988 if (rc < 0) {
989 cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
990 break;
991 }
992
993 if (rc > count) {
994 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
995 count);
996 break;
997 }
998
999 iov_iter_advance(iter, rc);
1000 count -= rc;
1001 rc += start;
1002 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
1003
1004 if (npages + cur_npages > max_pages) {
1005 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
1006 npages + cur_npages, max_pages);
1007 break;
1008 }
1009
1010 for (i = 0; i < cur_npages; i++) {
1011 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
1012 bv[npages + i].bv_page = pages[i];
1013 bv[npages + i].bv_offset = start;
1014 bv[npages + i].bv_len = len - start;
1015 rc -= len;
1016 start = 0;
1017 }
1018
1019 npages += cur_npages;
1020 }
1021
1022 kvfree(pages);
1023 ctx->bv = bv;
1024 ctx->len = saved_len - count;
1025 ctx->npages = npages;
1026 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
1027 return 0;
1028}
1029
1030/**
1031 * cifs_alloc_hash - allocate hash and hash context together
1032 *
1033 * The caller has to make sure @sdesc is initialized to either NULL or
1034 * a valid context. Both can be freed via cifs_free_hash().
1035 */
1036int
1037cifs_alloc_hash(const char *name,
1038 struct crypto_shash **shash, struct sdesc **sdesc)
1039{
1040 int rc = 0;
1041 size_t size;
1042
1043 if (*sdesc != NULL)
1044 return 0;
1045
1046 *shash = crypto_alloc_shash(name, 0, 0);
1047 if (IS_ERR(*shash)) {
1048 cifs_dbg(VFS, "Could not allocate crypto %s\n", name);
1049 rc = PTR_ERR(*shash);
1050 *shash = NULL;
1051 *sdesc = NULL;
1052 return rc;
1053 }
1054
1055 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
1056 *sdesc = kmalloc(size, GFP_KERNEL);
1057 if (*sdesc == NULL) {
1058 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
1059 crypto_free_shash(*shash);
1060 *shash = NULL;
1061 return -ENOMEM;
1062 }
1063
1064 (*sdesc)->shash.tfm = *shash;
1065 return 0;
1066}
1067
1068/**
1069 * cifs_free_hash - free hash and hash context together
1070 *
1071 * Freeing a NULL hash or context is safe.
1072 */
1073void
1074cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
1075{
1076 kfree(*sdesc);
1077 *sdesc = NULL;
1078 if (*shash)
1079 crypto_free_shash(*shash);
1080 *shash = NULL;
1081}
1082
1083/**
1084 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
1085 * Input: rqst - a smb_rqst, page - a page index for rqst
1086 * Output: *len - the length for this page, *offset - the offset for this page
1087 */
1088void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
1089 unsigned int *len, unsigned int *offset)
1090{
1091 *len = rqst->rq_pagesz;
1092 *offset = (page == 0) ? rqst->rq_offset : 0;
1093
1094 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
1095 *len = rqst->rq_tailsz;
1096 else if (page == 0)
1097 *len = rqst->rq_pagesz - rqst->rq_offset;
1098}
1099
1100void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1101{
1102 const char *end;
1103
1104 /* skip initial slashes */
1105 while (*unc && (*unc == '\\' || *unc == '/'))
1106 unc++;
1107
1108 end = unc;
1109
1110 while (*end && !(*end == '\\' || *end == '/'))
1111 end++;
1112
1113 *h = unc;
1114 *len = end - unc;
1115}
1116
1117/**
1118 * copy_path_name - copy src path to dst, possibly truncating
1119 *
1120 * returns number of bytes written (including trailing nul)
1121 */
1122int copy_path_name(char *dst, const char *src)
1123{
1124 int name_len;
1125
1126 /*
1127 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1128 * will truncate and strlen(dst) will be PATH_MAX-1
1129 */
1130 name_len = strscpy(dst, src, PATH_MAX);
1131 if (WARN_ON_ONCE(name_len < 0))
1132 name_len = PATH_MAX-1;
1133
1134 /* we count the trailing nul */
1135 name_len++;
1136 return name_len;
1137}
1138
1139struct super_cb_data {
1140 void *data;
1141 struct super_block *sb;
1142};
1143
1144static void tcp_super_cb(struct super_block *sb, void *arg)
1145{
1146 struct super_cb_data *sd = arg;
1147 struct TCP_Server_Info *server = sd->data;
1148 struct cifs_sb_info *cifs_sb;
1149 struct cifs_tcon *tcon;
1150
1151 if (sd->sb)
1152 return;
1153
1154 cifs_sb = CIFS_SB(sb);
1155 tcon = cifs_sb_master_tcon(cifs_sb);
1156 if (tcon->ses->server == server)
1157 sd->sb = sb;
1158}
1159
1160static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1161 void *data)
1162{
1163 struct super_cb_data sd = {
1164 .data = data,
1165 .sb = NULL,
1166 };
1167
1168 iterate_supers_type(&cifs_fs_type, f, &sd);
1169
1170 if (!sd.sb)
1171 return ERR_PTR(-EINVAL);
1172 /*
1173 * Grab an active reference in order to prevent automounts (DFS links)
1174 * of expiring and then freeing up our cifs superblock pointer while
1175 * we're doing failover.
1176 */
1177 cifs_sb_active(sd.sb);
1178 return sd.sb;
1179}
1180
1181static void __cifs_put_super(struct super_block *sb)
1182{
1183 if (!IS_ERR_OR_NULL(sb))
1184 cifs_sb_deactive(sb);
1185}
1186
1187struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
1188{
1189 return __cifs_get_super(tcp_super_cb, server);
1190}
1191
1192void cifs_put_tcp_super(struct super_block *sb)
1193{
1194 __cifs_put_super(sb);
1195}
1196
1197#ifdef CONFIG_CIFS_DFS_UPCALL
1198int match_target_ip(struct TCP_Server_Info *server,
1199 const char *share, size_t share_len,
1200 bool *result)
1201{
1202 int rc;
1203 char *target, *tip = NULL;
1204 struct sockaddr tipaddr;
1205
1206 *result = false;
1207
1208 target = kzalloc(share_len + 3, GFP_KERNEL);
1209 if (!target) {
1210 rc = -ENOMEM;
1211 goto out;
1212 }
1213
1214 scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1215
1216 cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1217
1218 rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
1219 if (rc < 0)
1220 goto out;
1221
1222 cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
1223
1224 if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
1225 cifs_dbg(VFS, "%s: failed to convert target ip address\n",
1226 __func__);
1227 rc = -EINVAL;
1228 goto out;
1229 }
1230
1231 *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
1232 &tipaddr);
1233 cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1234 rc = 0;
1235
1236out:
1237 kfree(target);
1238 kfree(tip);
1239
1240 return rc;
1241}
1242
1243static void tcon_super_cb(struct super_block *sb, void *arg)
1244{
1245 struct super_cb_data *sd = arg;
1246 struct cifs_tcon *tcon = sd->data;
1247 struct cifs_sb_info *cifs_sb;
1248
1249 if (sd->sb)
1250 return;
1251
1252 cifs_sb = CIFS_SB(sb);
1253 if (tcon->dfs_path && cifs_sb->origin_fullpath &&
1254 !strcasecmp(tcon->dfs_path, cifs_sb->origin_fullpath))
1255 sd->sb = sb;
1256}
1257
1258static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
1259{
1260 return __cifs_get_super(tcon_super_cb, tcon);
1261}
1262
1263static inline void cifs_put_tcon_super(struct super_block *sb)
1264{
1265 __cifs_put_super(sb);
1266}
1267#else
1268static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
1269{
1270 return ERR_PTR(-EOPNOTSUPP);
1271}
1272
1273static inline void cifs_put_tcon_super(struct super_block *sb)
1274{
1275}
1276#endif
1277
1278int update_super_prepath(struct cifs_tcon *tcon, char *prefix)
1279{
1280 struct super_block *sb;
1281 struct cifs_sb_info *cifs_sb;
1282 int rc = 0;
1283
1284 sb = cifs_get_tcon_super(tcon);
1285 if (IS_ERR(sb))
1286 return PTR_ERR(sb);
1287
1288 cifs_sb = CIFS_SB(sb);
1289
1290 kfree(cifs_sb->prepath);
1291
1292 if (prefix && *prefix) {
1293 cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
1294 if (!cifs_sb->prepath) {
1295 rc = -ENOMEM;
1296 goto out;
1297 }
1298
1299 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1300 } else
1301 cifs_sb->prepath = NULL;
1302
1303 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1304
1305out:
1306 cifs_put_tcon_super(sb);
1307 return rc;
1308}