Loading...
1/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
29#include "smberr.h"
30#include "nterr.h"
31#include "cifs_unicode.h"
32
33extern mempool_t *cifs_sm_req_poolp;
34extern mempool_t *cifs_req_poolp;
35
36/* The xid serves as a useful identifier for each incoming vfs request,
37 in a similar way to the mid which is useful to track each sent smb,
38 and CurrentXid can also provide a running counter (although it
39 will eventually wrap past zero) of the total vfs operations handled
40 since the cifs fs was mounted */
41
42unsigned int
43_GetXid(void)
44{
45 unsigned int xid;
46
47 spin_lock(&GlobalMid_Lock);
48 GlobalTotalActiveXid++;
49
50 /* keep high water mark for number of simultaneous ops in filesystem */
51 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
52 GlobalMaxActiveXid = GlobalTotalActiveXid;
53 if (GlobalTotalActiveXid > 65000)
54 cFYI(1, "warning: more than 65000 requests active");
55 xid = GlobalCurrentXid++;
56 spin_unlock(&GlobalMid_Lock);
57 return xid;
58}
59
60void
61_FreeXid(unsigned int xid)
62{
63 spin_lock(&GlobalMid_Lock);
64 /* if (GlobalTotalActiveXid == 0)
65 BUG(); */
66 GlobalTotalActiveXid--;
67 spin_unlock(&GlobalMid_Lock);
68}
69
70struct cifs_ses *
71sesInfoAlloc(void)
72{
73 struct cifs_ses *ret_buf;
74
75 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
76 if (ret_buf) {
77 atomic_inc(&sesInfoAllocCount);
78 ret_buf->status = CifsNew;
79 ++ret_buf->ses_count;
80 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
81 INIT_LIST_HEAD(&ret_buf->tcon_list);
82 mutex_init(&ret_buf->session_mutex);
83 }
84 return ret_buf;
85}
86
87void
88sesInfoFree(struct cifs_ses *buf_to_free)
89{
90 if (buf_to_free == NULL) {
91 cFYI(1, "Null buffer passed to sesInfoFree");
92 return;
93 }
94
95 atomic_dec(&sesInfoAllocCount);
96 kfree(buf_to_free->serverOS);
97 kfree(buf_to_free->serverDomain);
98 kfree(buf_to_free->serverNOS);
99 if (buf_to_free->password) {
100 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
101 kfree(buf_to_free->password);
102 }
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kfree(buf_to_free);
106}
107
108struct cifs_tcon *
109tconInfoAlloc(void)
110{
111 struct cifs_tcon *ret_buf;
112 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
113 if (ret_buf) {
114 atomic_inc(&tconInfoAllocCount);
115 ret_buf->tidStatus = CifsNew;
116 ++ret_buf->tc_count;
117 INIT_LIST_HEAD(&ret_buf->openFileList);
118 INIT_LIST_HEAD(&ret_buf->tcon_list);
119#ifdef CONFIG_CIFS_STATS
120 spin_lock_init(&ret_buf->stat_lock);
121#endif
122 }
123 return ret_buf;
124}
125
126void
127tconInfoFree(struct cifs_tcon *buf_to_free)
128{
129 if (buf_to_free == NULL) {
130 cFYI(1, "Null buffer passed to tconInfoFree");
131 return;
132 }
133 atomic_dec(&tconInfoAllocCount);
134 kfree(buf_to_free->nativeFileSystem);
135 if (buf_to_free->password) {
136 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
137 kfree(buf_to_free->password);
138 }
139 kfree(buf_to_free);
140}
141
142struct smb_hdr *
143cifs_buf_get(void)
144{
145 struct smb_hdr *ret_buf = NULL;
146
147/* We could use negotiated size instead of max_msgsize -
148 but it may be more efficient to always alloc same size
149 albeit slightly larger than necessary and maxbuffersize
150 defaults to this and can not be bigger */
151 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
152
153 /* clear the first few header bytes */
154 /* for most paths, more is cleared in header_assemble */
155 if (ret_buf) {
156 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
157 atomic_inc(&bufAllocCount);
158#ifdef CONFIG_CIFS_STATS2
159 atomic_inc(&totBufAllocCount);
160#endif /* CONFIG_CIFS_STATS2 */
161 }
162
163 return ret_buf;
164}
165
166void
167cifs_buf_release(void *buf_to_free)
168{
169 if (buf_to_free == NULL) {
170 /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
171 return;
172 }
173 mempool_free(buf_to_free, cifs_req_poolp);
174
175 atomic_dec(&bufAllocCount);
176 return;
177}
178
179struct smb_hdr *
180cifs_small_buf_get(void)
181{
182 struct smb_hdr *ret_buf = NULL;
183
184/* We could use negotiated size instead of max_msgsize -
185 but it may be more efficient to always alloc same size
186 albeit slightly larger than necessary and maxbuffersize
187 defaults to this and can not be bigger */
188 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
189 if (ret_buf) {
190 /* No need to clear memory here, cleared in header assemble */
191 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
192 atomic_inc(&smBufAllocCount);
193#ifdef CONFIG_CIFS_STATS2
194 atomic_inc(&totSmBufAllocCount);
195#endif /* CONFIG_CIFS_STATS2 */
196
197 }
198 return ret_buf;
199}
200
201void
202cifs_small_buf_release(void *buf_to_free)
203{
204
205 if (buf_to_free == NULL) {
206 cFYI(1, "Null buffer passed to cifs_small_buf_release");
207 return;
208 }
209 mempool_free(buf_to_free, cifs_sm_req_poolp);
210
211 atomic_dec(&smBufAllocCount);
212 return;
213}
214
215/*
216 Find a free multiplex id (SMB mid). Otherwise there could be
217 mid collisions which might cause problems, demultiplexing the
218 wrong response to this request. Multiplex ids could collide if
219 one of a series requests takes much longer than the others, or
220 if a very large number of long lived requests (byte range
221 locks or FindNotify requests) are pending. No more than
222 64K-1 requests can be outstanding at one time. If no
223 mids are available, return zero. A future optimization
224 could make the combination of mids and uid the key we use
225 to demultiplex on (rather than mid alone).
226 In addition to the above check, the cifs demultiplex
227 code already used the command code as a secondary
228 check of the frame and if signing is negotiated the
229 response would be discarded if the mid were the same
230 but the signature was wrong. Since the mid is not put in the
231 pending queue until later (when it is about to be dispatched)
232 we do have to limit the number of outstanding requests
233 to somewhat less than 64K-1 although it is hard to imagine
234 so many threads being in the vfs at one time.
235*/
236__u16 GetNextMid(struct TCP_Server_Info *server)
237{
238 __u16 mid = 0;
239 __u16 last_mid;
240 bool collision;
241
242 spin_lock(&GlobalMid_Lock);
243 last_mid = server->CurrentMid; /* we do not want to loop forever */
244 server->CurrentMid++;
245 /* This nested loop looks more expensive than it is.
246 In practice the list of pending requests is short,
247 fewer than 50, and the mids are likely to be unique
248 on the first pass through the loop unless some request
249 takes longer than the 64 thousand requests before it
250 (and it would also have to have been a request that
251 did not time out) */
252 while (server->CurrentMid != last_mid) {
253 struct mid_q_entry *mid_entry;
254 unsigned int num_mids;
255
256 collision = false;
257 if (server->CurrentMid == 0)
258 server->CurrentMid++;
259
260 num_mids = 0;
261 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
262 ++num_mids;
263 if (mid_entry->mid == server->CurrentMid &&
264 mid_entry->midState == MID_REQUEST_SUBMITTED) {
265 /* This mid is in use, try a different one */
266 collision = true;
267 break;
268 }
269 }
270
271 /*
272 * if we have more than 32k mids in the list, then something
273 * is very wrong. Possibly a local user is trying to DoS the
274 * box by issuing long-running calls and SIGKILL'ing them. If
275 * we get to 2^16 mids then we're in big trouble as this
276 * function could loop forever.
277 *
278 * Go ahead and assign out the mid in this situation, but force
279 * an eventual reconnect to clean out the pending_mid_q.
280 */
281 if (num_mids > 32768)
282 server->tcpStatus = CifsNeedReconnect;
283
284 if (!collision) {
285 mid = server->CurrentMid;
286 break;
287 }
288 server->CurrentMid++;
289 }
290 spin_unlock(&GlobalMid_Lock);
291 return mid;
292}
293
294/* NB: MID can not be set if treeCon not passed in, in that
295 case it is responsbility of caller to set the mid */
296void
297header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
298 const struct cifs_tcon *treeCon, int word_count
299 /* length of fixed section (word count) in two byte units */)
300{
301 struct list_head *temp_item;
302 struct cifs_ses *ses;
303 char *temp = (char *) buffer;
304
305 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
306
307 buffer->smb_buf_length = cpu_to_be32(
308 (2 * word_count) + sizeof(struct smb_hdr) -
309 4 /* RFC 1001 length field does not count */ +
310 2 /* for bcc field itself */) ;
311
312 buffer->Protocol[0] = 0xFF;
313 buffer->Protocol[1] = 'S';
314 buffer->Protocol[2] = 'M';
315 buffer->Protocol[3] = 'B';
316 buffer->Command = smb_command;
317 buffer->Flags = 0x00; /* case sensitive */
318 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
319 buffer->Pid = cpu_to_le16((__u16)current->tgid);
320 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
321 if (treeCon) {
322 buffer->Tid = treeCon->tid;
323 if (treeCon->ses) {
324 if (treeCon->ses->capabilities & CAP_UNICODE)
325 buffer->Flags2 |= SMBFLG2_UNICODE;
326 if (treeCon->ses->capabilities & CAP_STATUS32)
327 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
328
329 /* Uid is not converted */
330 buffer->Uid = treeCon->ses->Suid;
331 buffer->Mid = GetNextMid(treeCon->ses->server);
332 if (multiuser_mount != 0) {
333 /* For the multiuser case, there are few obvious technically */
334 /* possible mechanisms to match the local linux user (uid) */
335 /* to a valid remote smb user (smb_uid): */
336 /* 1) Query Winbind (or other local pam/nss daemon */
337 /* for userid/password/logon_domain or credential */
338 /* 2) Query Winbind for uid to sid to username mapping */
339 /* and see if we have a matching password for existing*/
340 /* session for that user perhas getting password by */
341 /* adding a new pam_cifs module that stores passwords */
342 /* so that the cifs vfs can get at that for all logged*/
343 /* on users */
344 /* 3) (Which is the mechanism we have chosen) */
345 /* Search through sessions to the same server for a */
346 /* a match on the uid that was passed in on mount */
347 /* with the current processes uid (or euid?) and use */
348 /* that smb uid. If no existing smb session for */
349 /* that uid found, use the default smb session ie */
350 /* the smb session for the volume mounted which is */
351 /* the same as would be used if the multiuser mount */
352 /* flag were disabled. */
353
354 /* BB Add support for establishing new tCon and SMB Session */
355 /* with userid/password pairs found on the smb session */
356 /* for other target tcp/ip addresses BB */
357 if (current_fsuid() != treeCon->ses->linux_uid) {
358 cFYI(1, "Multiuser mode and UID "
359 "did not match tcon uid");
360 spin_lock(&cifs_tcp_ses_lock);
361 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
362 ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
363 if (ses->linux_uid == current_fsuid()) {
364 if (ses->server == treeCon->ses->server) {
365 cFYI(1, "found matching uid substitute right smb_uid");
366 buffer->Uid = ses->Suid;
367 break;
368 } else {
369 /* BB eventually call cifs_setup_session here */
370 cFYI(1, "local UID found but no smb sess with this server exists");
371 }
372 }
373 }
374 spin_unlock(&cifs_tcp_ses_lock);
375 }
376 }
377 }
378 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
379 buffer->Flags2 |= SMBFLG2_DFS;
380 if (treeCon->nocase)
381 buffer->Flags |= SMBFLG_CASELESS;
382 if ((treeCon->ses) && (treeCon->ses->server))
383 if (treeCon->ses->server->sec_mode &
384 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
385 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
386 }
387
388/* endian conversion of flags is now done just before sending */
389 buffer->WordCount = (char) word_count;
390 return;
391}
392
393static int
394check_smb_hdr(struct smb_hdr *smb, __u16 mid)
395{
396 /* does it have the right SMB "signature" ? */
397 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
398 cERROR(1, "Bad protocol string signature header 0x%x",
399 *(unsigned int *)smb->Protocol);
400 return 1;
401 }
402
403 /* Make sure that message ids match */
404 if (mid != smb->Mid) {
405 cERROR(1, "Mids do not match. received=%u expected=%u",
406 smb->Mid, mid);
407 return 1;
408 }
409
410 /* if it's a response then accept */
411 if (smb->Flags & SMBFLG_RESPONSE)
412 return 0;
413
414 /* only one valid case where server sends us request */
415 if (smb->Command == SMB_COM_LOCKING_ANDX)
416 return 0;
417
418 cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
419 return 1;
420}
421
422int
423checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
424{
425 __u32 len = be32_to_cpu(smb->smb_buf_length);
426 __u32 clc_len; /* calculated length */
427 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
428
429 if (length < 2 + sizeof(struct smb_hdr)) {
430 if ((length >= sizeof(struct smb_hdr) - 1)
431 && (smb->Status.CifsError != 0)) {
432 smb->WordCount = 0;
433 /* some error cases do not return wct and bcc */
434 return 0;
435 } else if ((length == sizeof(struct smb_hdr) + 1) &&
436 (smb->WordCount == 0)) {
437 char *tmp = (char *)smb;
438 /* Need to work around a bug in two servers here */
439 /* First, check if the part of bcc they sent was zero */
440 if (tmp[sizeof(struct smb_hdr)] == 0) {
441 /* some servers return only half of bcc
442 * on simple responses (wct, bcc both zero)
443 * in particular have seen this on
444 * ulogoffX and FindClose. This leaves
445 * one byte of bcc potentially unitialized
446 */
447 /* zero rest of bcc */
448 tmp[sizeof(struct smb_hdr)+1] = 0;
449 return 0;
450 }
451 cERROR(1, "rcvd invalid byte count (bcc)");
452 } else {
453 cERROR(1, "Length less than smb header size");
454 }
455 return 1;
456 }
457 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
458 cERROR(1, "smb length greater than MaxBufSize, mid=%d",
459 smb->Mid);
460 return 1;
461 }
462
463 if (check_smb_hdr(smb, mid))
464 return 1;
465 clc_len = smbCalcSize(smb);
466
467 if (4 + len != length) {
468 cERROR(1, "Length read does not match RFC1001 length %d",
469 len);
470 return 1;
471 }
472
473 if (4 + len != clc_len) {
474 /* check if bcc wrapped around for large read responses */
475 if ((len > 64 * 1024) && (len > clc_len)) {
476 /* check if lengths match mod 64K */
477 if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
478 return 0; /* bcc wrapped */
479 }
480 cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
481 clc_len, 4 + len, smb->Mid);
482
483 if (4 + len < clc_len) {
484 cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
485 len, smb->Mid);
486 return 1;
487 } else if (len > clc_len + 512) {
488 /*
489 * Some servers (Windows XP in particular) send more
490 * data than the lengths in the SMB packet would
491 * indicate on certain calls (byte range locks and
492 * trans2 find first calls in particular). While the
493 * client can handle such a frame by ignoring the
494 * trailing data, we choose limit the amount of extra
495 * data to 512 bytes.
496 */
497 cERROR(1, "RFC1001 size %u more than 512 bytes larger "
498 "than SMB for mid=%u", len, smb->Mid);
499 return 1;
500 }
501 }
502 return 0;
503}
504
505bool
506is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
507{
508 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
509 struct list_head *tmp, *tmp1, *tmp2;
510 struct cifs_ses *ses;
511 struct cifs_tcon *tcon;
512 struct cifsInodeInfo *pCifsInode;
513 struct cifsFileInfo *netfile;
514
515 cFYI(1, "Checking for oplock break or dnotify response");
516 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
517 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
518 struct smb_com_transaction_change_notify_rsp *pSMBr =
519 (struct smb_com_transaction_change_notify_rsp *)buf;
520 struct file_notify_information *pnotify;
521 __u32 data_offset = 0;
522 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
523 data_offset = le32_to_cpu(pSMBr->DataOffset);
524
525 pnotify = (struct file_notify_information *)
526 ((char *)&pSMBr->hdr.Protocol + data_offset);
527 cFYI(1, "dnotify on %s Action: 0x%x",
528 pnotify->FileName, pnotify->Action);
529 /* cifs_dump_mem("Rcvd notify Data: ",buf,
530 sizeof(struct smb_hdr)+60); */
531 return true;
532 }
533 if (pSMBr->hdr.Status.CifsError) {
534 cFYI(1, "notify err 0x%d",
535 pSMBr->hdr.Status.CifsError);
536 return true;
537 }
538 return false;
539 }
540 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
541 return false;
542 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
543 /* no sense logging error on invalid handle on oplock
544 break - harmless race between close request and oplock
545 break response is expected from time to time writing out
546 large dirty files cached on the client */
547 if ((NT_STATUS_INVALID_HANDLE) ==
548 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
549 cFYI(1, "invalid handle on oplock break");
550 return true;
551 } else if (ERRbadfid ==
552 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
553 return true;
554 } else {
555 return false; /* on valid oplock brk we get "request" */
556 }
557 }
558 if (pSMB->hdr.WordCount != 8)
559 return false;
560
561 cFYI(1, "oplock type 0x%d level 0x%d",
562 pSMB->LockType, pSMB->OplockLevel);
563 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
564 return false;
565
566 /* look up tcon based on tid & uid */
567 spin_lock(&cifs_tcp_ses_lock);
568 list_for_each(tmp, &srv->smb_ses_list) {
569 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
570 list_for_each(tmp1, &ses->tcon_list) {
571 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
572 if (tcon->tid != buf->Tid)
573 continue;
574
575 cifs_stats_inc(&tcon->num_oplock_brks);
576 spin_lock(&cifs_file_list_lock);
577 list_for_each(tmp2, &tcon->openFileList) {
578 netfile = list_entry(tmp2, struct cifsFileInfo,
579 tlist);
580 if (pSMB->Fid != netfile->netfid)
581 continue;
582
583 cFYI(1, "file id match, oplock break");
584 pCifsInode = CIFS_I(netfile->dentry->d_inode);
585
586 cifs_set_oplock_level(pCifsInode,
587 pSMB->OplockLevel ? OPLOCK_READ : 0);
588 queue_work(system_nrt_wq,
589 &netfile->oplock_break);
590 netfile->oplock_break_cancelled = false;
591
592 spin_unlock(&cifs_file_list_lock);
593 spin_unlock(&cifs_tcp_ses_lock);
594 return true;
595 }
596 spin_unlock(&cifs_file_list_lock);
597 spin_unlock(&cifs_tcp_ses_lock);
598 cFYI(1, "No matching file for oplock break");
599 return true;
600 }
601 }
602 spin_unlock(&cifs_tcp_ses_lock);
603 cFYI(1, "Can not process oplock break for non-existent connection");
604 return true;
605}
606
607void
608dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
609{
610 int i, j;
611 char debug_line[17];
612 unsigned char *buffer;
613
614 if (traceSMB == 0)
615 return;
616
617 buffer = (unsigned char *) smb_buf;
618 for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
619 if (i % 8 == 0) {
620 /* have reached the beginning of line */
621 printk(KERN_DEBUG "| ");
622 j = 0;
623 }
624 printk("%0#4x ", buffer[i]);
625 debug_line[2 * j] = ' ';
626 if (isprint(buffer[i]))
627 debug_line[1 + (2 * j)] = buffer[i];
628 else
629 debug_line[1 + (2 * j)] = '_';
630
631 if (i % 8 == 7) {
632 /* reached end of line, time to print ascii */
633 debug_line[16] = 0;
634 printk(" | %s\n", debug_line);
635 }
636 }
637 for (; j < 8; j++) {
638 printk(" ");
639 debug_line[2 * j] = ' ';
640 debug_line[1 + (2 * j)] = ' ';
641 }
642 printk(" | %s\n", debug_line);
643 return;
644}
645
646void
647cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
648{
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
650 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
651 cERROR(1, "Autodisabling the use of server inode numbers on "
652 "%s. This server doesn't seem to support them "
653 "properly. Hardlinks will not be recognized on this "
654 "mount. Consider mounting with the \"noserverino\" "
655 "option to silence this message.",
656 cifs_sb_master_tcon(cifs_sb)->treeName);
657 }
658}
659
660void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
661{
662 oplock &= 0xF;
663
664 if (oplock == OPLOCK_EXCLUSIVE) {
665 cinode->clientCanCacheAll = true;
666 cinode->clientCanCacheRead = true;
667 cFYI(1, "Exclusive Oplock granted on inode %p",
668 &cinode->vfs_inode);
669 } else if (oplock == OPLOCK_READ) {
670 cinode->clientCanCacheAll = false;
671 cinode->clientCanCacheRead = true;
672 cFYI(1, "Level II Oplock granted on inode %p",
673 &cinode->vfs_inode);
674 } else {
675 cinode->clientCanCacheAll = false;
676 cinode->clientCanCacheRead = false;
677 }
678}
1/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include <linux/vmalloc.h>
26#include "cifspdu.h"
27#include "cifsglob.h"
28#include "cifsproto.h"
29#include "cifs_debug.h"
30#include "smberr.h"
31#include "nterr.h"
32#include "cifs_unicode.h"
33#include "smb2pdu.h"
34#include "cifsfs.h"
35#ifdef CONFIG_CIFS_DFS_UPCALL
36#include "dns_resolve.h"
37#endif
38
39extern mempool_t *cifs_sm_req_poolp;
40extern mempool_t *cifs_req_poolp;
41
42/* The xid serves as a useful identifier for each incoming vfs request,
43 in a similar way to the mid which is useful to track each sent smb,
44 and CurrentXid can also provide a running counter (although it
45 will eventually wrap past zero) of the total vfs operations handled
46 since the cifs fs was mounted */
47
48unsigned int
49_get_xid(void)
50{
51 unsigned int xid;
52
53 spin_lock(&GlobalMid_Lock);
54 GlobalTotalActiveXid++;
55
56 /* keep high water mark for number of simultaneous ops in filesystem */
57 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
58 GlobalMaxActiveXid = GlobalTotalActiveXid;
59 if (GlobalTotalActiveXid > 65000)
60 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
61 xid = GlobalCurrentXid++;
62 spin_unlock(&GlobalMid_Lock);
63 return xid;
64}
65
66void
67_free_xid(unsigned int xid)
68{
69 spin_lock(&GlobalMid_Lock);
70 /* if (GlobalTotalActiveXid == 0)
71 BUG(); */
72 GlobalTotalActiveXid--;
73 spin_unlock(&GlobalMid_Lock);
74}
75
76struct cifs_ses *
77sesInfoAlloc(void)
78{
79 struct cifs_ses *ret_buf;
80
81 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
82 if (ret_buf) {
83 atomic_inc(&sesInfoAllocCount);
84 ret_buf->status = CifsNew;
85 ++ret_buf->ses_count;
86 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
87 INIT_LIST_HEAD(&ret_buf->tcon_list);
88 mutex_init(&ret_buf->session_mutex);
89 spin_lock_init(&ret_buf->iface_lock);
90 }
91 return ret_buf;
92}
93
94void
95sesInfoFree(struct cifs_ses *buf_to_free)
96{
97 if (buf_to_free == NULL) {
98 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
99 return;
100 }
101
102 atomic_dec(&sesInfoAllocCount);
103 kfree(buf_to_free->serverOS);
104 kfree(buf_to_free->serverDomain);
105 kfree(buf_to_free->serverNOS);
106 kfree_sensitive(buf_to_free->password);
107 kfree(buf_to_free->user_name);
108 kfree(buf_to_free->domainName);
109 kfree_sensitive(buf_to_free->auth_key.response);
110 kfree(buf_to_free->iface_list);
111 kfree_sensitive(buf_to_free);
112}
113
114struct cifs_tcon *
115tconInfoAlloc(void)
116{
117 struct cifs_tcon *ret_buf;
118
119 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
120 if (!ret_buf)
121 return NULL;
122 ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL);
123 if (!ret_buf->crfid.fid) {
124 kfree(ret_buf);
125 return NULL;
126 }
127
128 atomic_inc(&tconInfoAllocCount);
129 ret_buf->tidStatus = CifsNew;
130 ++ret_buf->tc_count;
131 INIT_LIST_HEAD(&ret_buf->openFileList);
132 INIT_LIST_HEAD(&ret_buf->tcon_list);
133 spin_lock_init(&ret_buf->open_file_lock);
134 mutex_init(&ret_buf->crfid.fid_mutex);
135 spin_lock_init(&ret_buf->stat_lock);
136 atomic_set(&ret_buf->num_local_opens, 0);
137 atomic_set(&ret_buf->num_remote_opens, 0);
138
139 return ret_buf;
140}
141
142void
143tconInfoFree(struct cifs_tcon *buf_to_free)
144{
145 if (buf_to_free == NULL) {
146 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
147 return;
148 }
149 atomic_dec(&tconInfoAllocCount);
150 kfree(buf_to_free->nativeFileSystem);
151 kfree_sensitive(buf_to_free->password);
152 kfree(buf_to_free->crfid.fid);
153#ifdef CONFIG_CIFS_DFS_UPCALL
154 kfree(buf_to_free->dfs_path);
155#endif
156 kfree(buf_to_free);
157}
158
159struct smb_hdr *
160cifs_buf_get(void)
161{
162 struct smb_hdr *ret_buf = NULL;
163 /*
164 * SMB2 header is bigger than CIFS one - no problems to clean some
165 * more bytes for CIFS.
166 */
167 size_t buf_size = sizeof(struct smb2_sync_hdr);
168
169 /*
170 * We could use negotiated size instead of max_msgsize -
171 * but it may be more efficient to always alloc same size
172 * albeit slightly larger than necessary and maxbuffersize
173 * defaults to this and can not be bigger.
174 */
175 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
176
177 /* clear the first few header bytes */
178 /* for most paths, more is cleared in header_assemble */
179 memset(ret_buf, 0, buf_size + 3);
180 atomic_inc(&bufAllocCount);
181#ifdef CONFIG_CIFS_STATS2
182 atomic_inc(&totBufAllocCount);
183#endif /* CONFIG_CIFS_STATS2 */
184
185 return ret_buf;
186}
187
188void
189cifs_buf_release(void *buf_to_free)
190{
191 if (buf_to_free == NULL) {
192 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
193 return;
194 }
195 mempool_free(buf_to_free, cifs_req_poolp);
196
197 atomic_dec(&bufAllocCount);
198 return;
199}
200
201struct smb_hdr *
202cifs_small_buf_get(void)
203{
204 struct smb_hdr *ret_buf = NULL;
205
206/* We could use negotiated size instead of max_msgsize -
207 but it may be more efficient to always alloc same size
208 albeit slightly larger than necessary and maxbuffersize
209 defaults to this and can not be bigger */
210 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
211 /* No need to clear memory here, cleared in header assemble */
212 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
213 atomic_inc(&smBufAllocCount);
214#ifdef CONFIG_CIFS_STATS2
215 atomic_inc(&totSmBufAllocCount);
216#endif /* CONFIG_CIFS_STATS2 */
217
218 return ret_buf;
219}
220
221void
222cifs_small_buf_release(void *buf_to_free)
223{
224
225 if (buf_to_free == NULL) {
226 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
227 return;
228 }
229 mempool_free(buf_to_free, cifs_sm_req_poolp);
230
231 atomic_dec(&smBufAllocCount);
232 return;
233}
234
235void
236free_rsp_buf(int resp_buftype, void *rsp)
237{
238 if (resp_buftype == CIFS_SMALL_BUFFER)
239 cifs_small_buf_release(rsp);
240 else if (resp_buftype == CIFS_LARGE_BUFFER)
241 cifs_buf_release(rsp);
242}
243
244/* NB: MID can not be set if treeCon not passed in, in that
245 case it is responsbility of caller to set the mid */
246void
247header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
248 const struct cifs_tcon *treeCon, int word_count
249 /* length of fixed section (word count) in two byte units */)
250{
251 char *temp = (char *) buffer;
252
253 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
254
255 buffer->smb_buf_length = cpu_to_be32(
256 (2 * word_count) + sizeof(struct smb_hdr) -
257 4 /* RFC 1001 length field does not count */ +
258 2 /* for bcc field itself */) ;
259
260 buffer->Protocol[0] = 0xFF;
261 buffer->Protocol[1] = 'S';
262 buffer->Protocol[2] = 'M';
263 buffer->Protocol[3] = 'B';
264 buffer->Command = smb_command;
265 buffer->Flags = 0x00; /* case sensitive */
266 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
267 buffer->Pid = cpu_to_le16((__u16)current->tgid);
268 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
269 if (treeCon) {
270 buffer->Tid = treeCon->tid;
271 if (treeCon->ses) {
272 if (treeCon->ses->capabilities & CAP_UNICODE)
273 buffer->Flags2 |= SMBFLG2_UNICODE;
274 if (treeCon->ses->capabilities & CAP_STATUS32)
275 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
276
277 /* Uid is not converted */
278 buffer->Uid = treeCon->ses->Suid;
279 buffer->Mid = get_next_mid(treeCon->ses->server);
280 }
281 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
282 buffer->Flags2 |= SMBFLG2_DFS;
283 if (treeCon->nocase)
284 buffer->Flags |= SMBFLG_CASELESS;
285 if ((treeCon->ses) && (treeCon->ses->server))
286 if (treeCon->ses->server->sign)
287 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
288 }
289
290/* endian conversion of flags is now done just before sending */
291 buffer->WordCount = (char) word_count;
292 return;
293}
294
295static int
296check_smb_hdr(struct smb_hdr *smb)
297{
298 /* does it have the right SMB "signature" ? */
299 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
300 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
301 *(unsigned int *)smb->Protocol);
302 return 1;
303 }
304
305 /* if it's a response then accept */
306 if (smb->Flags & SMBFLG_RESPONSE)
307 return 0;
308
309 /* only one valid case where server sends us request */
310 if (smb->Command == SMB_COM_LOCKING_ANDX)
311 return 0;
312
313 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
314 get_mid(smb));
315 return 1;
316}
317
318int
319checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
320{
321 struct smb_hdr *smb = (struct smb_hdr *)buf;
322 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
323 __u32 clc_len; /* calculated length */
324 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
325 total_read, rfclen);
326
327 /* is this frame too small to even get to a BCC? */
328 if (total_read < 2 + sizeof(struct smb_hdr)) {
329 if ((total_read >= sizeof(struct smb_hdr) - 1)
330 && (smb->Status.CifsError != 0)) {
331 /* it's an error return */
332 smb->WordCount = 0;
333 /* some error cases do not return wct and bcc */
334 return 0;
335 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
336 (smb->WordCount == 0)) {
337 char *tmp = (char *)smb;
338 /* Need to work around a bug in two servers here */
339 /* First, check if the part of bcc they sent was zero */
340 if (tmp[sizeof(struct smb_hdr)] == 0) {
341 /* some servers return only half of bcc
342 * on simple responses (wct, bcc both zero)
343 * in particular have seen this on
344 * ulogoffX and FindClose. This leaves
345 * one byte of bcc potentially unitialized
346 */
347 /* zero rest of bcc */
348 tmp[sizeof(struct smb_hdr)+1] = 0;
349 return 0;
350 }
351 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
352 } else {
353 cifs_dbg(VFS, "Length less than smb header size\n");
354 }
355 return -EIO;
356 }
357
358 /* otherwise, there is enough to get to the BCC */
359 if (check_smb_hdr(smb))
360 return -EIO;
361 clc_len = smbCalcSize(smb, server);
362
363 if (4 + rfclen != total_read) {
364 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
365 rfclen);
366 return -EIO;
367 }
368
369 if (4 + rfclen != clc_len) {
370 __u16 mid = get_mid(smb);
371 /* check if bcc wrapped around for large read responses */
372 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
373 /* check if lengths match mod 64K */
374 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
375 return 0; /* bcc wrapped */
376 }
377 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
378 clc_len, 4 + rfclen, mid);
379
380 if (4 + rfclen < clc_len) {
381 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
382 rfclen, mid);
383 return -EIO;
384 } else if (rfclen > clc_len + 512) {
385 /*
386 * Some servers (Windows XP in particular) send more
387 * data than the lengths in the SMB packet would
388 * indicate on certain calls (byte range locks and
389 * trans2 find first calls in particular). While the
390 * client can handle such a frame by ignoring the
391 * trailing data, we choose limit the amount of extra
392 * data to 512 bytes.
393 */
394 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
395 rfclen, mid);
396 return -EIO;
397 }
398 }
399 return 0;
400}
401
402bool
403is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
404{
405 struct smb_hdr *buf = (struct smb_hdr *)buffer;
406 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
407 struct list_head *tmp, *tmp1, *tmp2;
408 struct cifs_ses *ses;
409 struct cifs_tcon *tcon;
410 struct cifsInodeInfo *pCifsInode;
411 struct cifsFileInfo *netfile;
412
413 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
414 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
415 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
416 struct smb_com_transaction_change_notify_rsp *pSMBr =
417 (struct smb_com_transaction_change_notify_rsp *)buf;
418 struct file_notify_information *pnotify;
419 __u32 data_offset = 0;
420 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
421
422 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
423 data_offset = le32_to_cpu(pSMBr->DataOffset);
424
425 if (data_offset >
426 len - sizeof(struct file_notify_information)) {
427 cifs_dbg(FYI, "Invalid data_offset %u\n",
428 data_offset);
429 return true;
430 }
431 pnotify = (struct file_notify_information *)
432 ((char *)&pSMBr->hdr.Protocol + data_offset);
433 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
434 pnotify->FileName, pnotify->Action);
435 /* cifs_dump_mem("Rcvd notify Data: ",buf,
436 sizeof(struct smb_hdr)+60); */
437 return true;
438 }
439 if (pSMBr->hdr.Status.CifsError) {
440 cifs_dbg(FYI, "notify err 0x%x\n",
441 pSMBr->hdr.Status.CifsError);
442 return true;
443 }
444 return false;
445 }
446 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
447 return false;
448 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
449 /* no sense logging error on invalid handle on oplock
450 break - harmless race between close request and oplock
451 break response is expected from time to time writing out
452 large dirty files cached on the client */
453 if ((NT_STATUS_INVALID_HANDLE) ==
454 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
455 cifs_dbg(FYI, "Invalid handle on oplock break\n");
456 return true;
457 } else if (ERRbadfid ==
458 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
459 return true;
460 } else {
461 return false; /* on valid oplock brk we get "request" */
462 }
463 }
464 if (pSMB->hdr.WordCount != 8)
465 return false;
466
467 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
468 pSMB->LockType, pSMB->OplockLevel);
469 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
470 return false;
471
472 /* look up tcon based on tid & uid */
473 spin_lock(&cifs_tcp_ses_lock);
474 list_for_each(tmp, &srv->smb_ses_list) {
475 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
476 list_for_each(tmp1, &ses->tcon_list) {
477 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
478 if (tcon->tid != buf->Tid)
479 continue;
480
481 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
482 spin_lock(&tcon->open_file_lock);
483 list_for_each(tmp2, &tcon->openFileList) {
484 netfile = list_entry(tmp2, struct cifsFileInfo,
485 tlist);
486 if (pSMB->Fid != netfile->fid.netfid)
487 continue;
488
489 cifs_dbg(FYI, "file id match, oplock break\n");
490 pCifsInode = CIFS_I(d_inode(netfile->dentry));
491
492 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
493 &pCifsInode->flags);
494
495 netfile->oplock_epoch = 0;
496 netfile->oplock_level = pSMB->OplockLevel;
497 netfile->oplock_break_cancelled = false;
498 cifs_queue_oplock_break(netfile);
499
500 spin_unlock(&tcon->open_file_lock);
501 spin_unlock(&cifs_tcp_ses_lock);
502 return true;
503 }
504 spin_unlock(&tcon->open_file_lock);
505 spin_unlock(&cifs_tcp_ses_lock);
506 cifs_dbg(FYI, "No matching file for oplock break\n");
507 return true;
508 }
509 }
510 spin_unlock(&cifs_tcp_ses_lock);
511 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
512 return true;
513}
514
515void
516dump_smb(void *buf, int smb_buf_length)
517{
518 if (traceSMB == 0)
519 return;
520
521 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
522 smb_buf_length, true);
523}
524
525void
526cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
527{
528 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
529 struct cifs_tcon *tcon = NULL;
530
531 if (cifs_sb->master_tlink)
532 tcon = cifs_sb_master_tcon(cifs_sb);
533
534 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
535 cifs_sb->mnt_cifs_serverino_autodisabled = true;
536 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
537 tcon ? tcon->treeName : "new server");
538 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
539 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
540
541 }
542}
543
544void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
545{
546 oplock &= 0xF;
547
548 if (oplock == OPLOCK_EXCLUSIVE) {
549 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
550 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
551 &cinode->vfs_inode);
552 } else if (oplock == OPLOCK_READ) {
553 cinode->oplock = CIFS_CACHE_READ_FLG;
554 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
555 &cinode->vfs_inode);
556 } else
557 cinode->oplock = 0;
558}
559
560/*
561 * We wait for oplock breaks to be processed before we attempt to perform
562 * writes.
563 */
564int cifs_get_writer(struct cifsInodeInfo *cinode)
565{
566 int rc;
567
568start:
569 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
570 TASK_KILLABLE);
571 if (rc)
572 return rc;
573
574 spin_lock(&cinode->writers_lock);
575 if (!cinode->writers)
576 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
577 cinode->writers++;
578 /* Check to see if we have started servicing an oplock break */
579 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
580 cinode->writers--;
581 if (cinode->writers == 0) {
582 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
583 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
584 }
585 spin_unlock(&cinode->writers_lock);
586 goto start;
587 }
588 spin_unlock(&cinode->writers_lock);
589 return 0;
590}
591
592void cifs_put_writer(struct cifsInodeInfo *cinode)
593{
594 spin_lock(&cinode->writers_lock);
595 cinode->writers--;
596 if (cinode->writers == 0) {
597 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
598 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
599 }
600 spin_unlock(&cinode->writers_lock);
601}
602
603/**
604 * cifs_queue_oplock_break - queue the oplock break handler for cfile
605 *
606 * This function is called from the demultiplex thread when it
607 * receives an oplock break for @cfile.
608 *
609 * Assumes the tcon->open_file_lock is held.
610 * Assumes cfile->file_info_lock is NOT held.
611 */
612void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
613{
614 /*
615 * Bump the handle refcount now while we hold the
616 * open_file_lock to enforce the validity of it for the oplock
617 * break handler. The matching put is done at the end of the
618 * handler.
619 */
620 cifsFileInfo_get(cfile);
621
622 queue_work(cifsoplockd_wq, &cfile->oplock_break);
623}
624
625void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
626{
627 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
628 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
629}
630
631bool
632backup_cred(struct cifs_sb_info *cifs_sb)
633{
634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
635 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
636 return true;
637 }
638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
639 if (in_group_p(cifs_sb->mnt_backupgid))
640 return true;
641 }
642
643 return false;
644}
645
646void
647cifs_del_pending_open(struct cifs_pending_open *open)
648{
649 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
650 list_del(&open->olist);
651 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
652}
653
654void
655cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
656 struct cifs_pending_open *open)
657{
658 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
659 open->oplock = CIFS_OPLOCK_NO_CHANGE;
660 open->tlink = tlink;
661 fid->pending_open = open;
662 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
663}
664
665void
666cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
667 struct cifs_pending_open *open)
668{
669 spin_lock(&tlink_tcon(tlink)->open_file_lock);
670 cifs_add_pending_open_locked(fid, tlink, open);
671 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
672}
673
674/* parses DFS refferal V3 structure
675 * caller is responsible for freeing target_nodes
676 * returns:
677 * - on success - 0
678 * - on failure - errno
679 */
680int
681parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
682 unsigned int *num_of_nodes,
683 struct dfs_info3_param **target_nodes,
684 const struct nls_table *nls_codepage, int remap,
685 const char *searchName, bool is_unicode)
686{
687 int i, rc = 0;
688 char *data_end;
689 struct dfs_referral_level_3 *ref;
690
691 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
692
693 if (*num_of_nodes < 1) {
694 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
695 *num_of_nodes);
696 rc = -EINVAL;
697 goto parse_DFS_referrals_exit;
698 }
699
700 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
701 if (ref->VersionNumber != cpu_to_le16(3)) {
702 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
703 le16_to_cpu(ref->VersionNumber));
704 rc = -EINVAL;
705 goto parse_DFS_referrals_exit;
706 }
707
708 /* get the upper boundary of the resp buffer */
709 data_end = (char *)rsp + rsp_size;
710
711 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
712 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
713
714 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
715 GFP_KERNEL);
716 if (*target_nodes == NULL) {
717 rc = -ENOMEM;
718 goto parse_DFS_referrals_exit;
719 }
720
721 /* collect necessary data from referrals */
722 for (i = 0; i < *num_of_nodes; i++) {
723 char *temp;
724 int max_len;
725 struct dfs_info3_param *node = (*target_nodes)+i;
726
727 node->flags = le32_to_cpu(rsp->DFSFlags);
728 if (is_unicode) {
729 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
730 GFP_KERNEL);
731 if (tmp == NULL) {
732 rc = -ENOMEM;
733 goto parse_DFS_referrals_exit;
734 }
735 cifsConvertToUTF16((__le16 *) tmp, searchName,
736 PATH_MAX, nls_codepage, remap);
737 node->path_consumed = cifs_utf16_bytes(tmp,
738 le16_to_cpu(rsp->PathConsumed),
739 nls_codepage);
740 kfree(tmp);
741 } else
742 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
743
744 node->server_type = le16_to_cpu(ref->ServerType);
745 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
746
747 /* copy DfsPath */
748 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
749 max_len = data_end - temp;
750 node->path_name = cifs_strndup_from_utf16(temp, max_len,
751 is_unicode, nls_codepage);
752 if (!node->path_name) {
753 rc = -ENOMEM;
754 goto parse_DFS_referrals_exit;
755 }
756
757 /* copy link target UNC */
758 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
759 max_len = data_end - temp;
760 node->node_name = cifs_strndup_from_utf16(temp, max_len,
761 is_unicode, nls_codepage);
762 if (!node->node_name) {
763 rc = -ENOMEM;
764 goto parse_DFS_referrals_exit;
765 }
766
767 node->ttl = le32_to_cpu(ref->TimeToLive);
768
769 ref++;
770 }
771
772parse_DFS_referrals_exit:
773 if (rc) {
774 free_dfs_info_array(*target_nodes, *num_of_nodes);
775 *target_nodes = NULL;
776 *num_of_nodes = 0;
777 }
778 return rc;
779}
780
781struct cifs_aio_ctx *
782cifs_aio_ctx_alloc(void)
783{
784 struct cifs_aio_ctx *ctx;
785
786 /*
787 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
788 * to false so that we know when we have to unreference pages within
789 * cifs_aio_ctx_release()
790 */
791 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
792 if (!ctx)
793 return NULL;
794
795 INIT_LIST_HEAD(&ctx->list);
796 mutex_init(&ctx->aio_mutex);
797 init_completion(&ctx->done);
798 kref_init(&ctx->refcount);
799 return ctx;
800}
801
802void
803cifs_aio_ctx_release(struct kref *refcount)
804{
805 struct cifs_aio_ctx *ctx = container_of(refcount,
806 struct cifs_aio_ctx, refcount);
807
808 cifsFileInfo_put(ctx->cfile);
809
810 /*
811 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
812 * which means that iov_iter_get_pages() was a success and thus that
813 * we have taken reference on pages.
814 */
815 if (ctx->bv) {
816 unsigned i;
817
818 for (i = 0; i < ctx->npages; i++) {
819 if (ctx->should_dirty)
820 set_page_dirty(ctx->bv[i].bv_page);
821 put_page(ctx->bv[i].bv_page);
822 }
823 kvfree(ctx->bv);
824 }
825
826 kfree(ctx);
827}
828
829#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
830
831int
832setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
833{
834 ssize_t rc;
835 unsigned int cur_npages;
836 unsigned int npages = 0;
837 unsigned int i;
838 size_t len;
839 size_t count = iov_iter_count(iter);
840 unsigned int saved_len;
841 size_t start;
842 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
843 struct page **pages = NULL;
844 struct bio_vec *bv = NULL;
845
846 if (iov_iter_is_kvec(iter)) {
847 memcpy(&ctx->iter, iter, sizeof(*iter));
848 ctx->len = count;
849 iov_iter_advance(iter, count);
850 return 0;
851 }
852
853 if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
854 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
855
856 if (!bv) {
857 bv = vmalloc(array_size(max_pages, sizeof(*bv)));
858 if (!bv)
859 return -ENOMEM;
860 }
861
862 if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
863 pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
864
865 if (!pages) {
866 pages = vmalloc(array_size(max_pages, sizeof(*pages)));
867 if (!pages) {
868 kvfree(bv);
869 return -ENOMEM;
870 }
871 }
872
873 saved_len = count;
874
875 while (count && npages < max_pages) {
876 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
877 if (rc < 0) {
878 cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
879 break;
880 }
881
882 if (rc > count) {
883 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
884 count);
885 break;
886 }
887
888 iov_iter_advance(iter, rc);
889 count -= rc;
890 rc += start;
891 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
892
893 if (npages + cur_npages > max_pages) {
894 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
895 npages + cur_npages, max_pages);
896 break;
897 }
898
899 for (i = 0; i < cur_npages; i++) {
900 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
901 bv[npages + i].bv_page = pages[i];
902 bv[npages + i].bv_offset = start;
903 bv[npages + i].bv_len = len - start;
904 rc -= len;
905 start = 0;
906 }
907
908 npages += cur_npages;
909 }
910
911 kvfree(pages);
912 ctx->bv = bv;
913 ctx->len = saved_len - count;
914 ctx->npages = npages;
915 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
916 return 0;
917}
918
919/**
920 * cifs_alloc_hash - allocate hash and hash context together
921 *
922 * The caller has to make sure @sdesc is initialized to either NULL or
923 * a valid context. Both can be freed via cifs_free_hash().
924 */
925int
926cifs_alloc_hash(const char *name,
927 struct crypto_shash **shash, struct sdesc **sdesc)
928{
929 int rc = 0;
930 size_t size;
931
932 if (*sdesc != NULL)
933 return 0;
934
935 *shash = crypto_alloc_shash(name, 0, 0);
936 if (IS_ERR(*shash)) {
937 cifs_dbg(VFS, "Could not allocate crypto %s\n", name);
938 rc = PTR_ERR(*shash);
939 *shash = NULL;
940 *sdesc = NULL;
941 return rc;
942 }
943
944 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
945 *sdesc = kmalloc(size, GFP_KERNEL);
946 if (*sdesc == NULL) {
947 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
948 crypto_free_shash(*shash);
949 *shash = NULL;
950 return -ENOMEM;
951 }
952
953 (*sdesc)->shash.tfm = *shash;
954 return 0;
955}
956
957/**
958 * cifs_free_hash - free hash and hash context together
959 *
960 * Freeing a NULL hash or context is safe.
961 */
962void
963cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
964{
965 kfree(*sdesc);
966 *sdesc = NULL;
967 if (*shash)
968 crypto_free_shash(*shash);
969 *shash = NULL;
970}
971
972/**
973 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
974 * Input: rqst - a smb_rqst, page - a page index for rqst
975 * Output: *len - the length for this page, *offset - the offset for this page
976 */
977void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
978 unsigned int *len, unsigned int *offset)
979{
980 *len = rqst->rq_pagesz;
981 *offset = (page == 0) ? rqst->rq_offset : 0;
982
983 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
984 *len = rqst->rq_tailsz;
985 else if (page == 0)
986 *len = rqst->rq_pagesz - rqst->rq_offset;
987}
988
989void extract_unc_hostname(const char *unc, const char **h, size_t *len)
990{
991 const char *end;
992
993 /* skip initial slashes */
994 while (*unc && (*unc == '\\' || *unc == '/'))
995 unc++;
996
997 end = unc;
998
999 while (*end && !(*end == '\\' || *end == '/'))
1000 end++;
1001
1002 *h = unc;
1003 *len = end - unc;
1004}
1005
1006/**
1007 * copy_path_name - copy src path to dst, possibly truncating
1008 *
1009 * returns number of bytes written (including trailing nul)
1010 */
1011int copy_path_name(char *dst, const char *src)
1012{
1013 int name_len;
1014
1015 /*
1016 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1017 * will truncate and strlen(dst) will be PATH_MAX-1
1018 */
1019 name_len = strscpy(dst, src, PATH_MAX);
1020 if (WARN_ON_ONCE(name_len < 0))
1021 name_len = PATH_MAX-1;
1022
1023 /* we count the trailing nul */
1024 name_len++;
1025 return name_len;
1026}
1027
1028struct super_cb_data {
1029 void *data;
1030 struct super_block *sb;
1031};
1032
1033static void tcp_super_cb(struct super_block *sb, void *arg)
1034{
1035 struct super_cb_data *sd = arg;
1036 struct TCP_Server_Info *server = sd->data;
1037 struct cifs_sb_info *cifs_sb;
1038 struct cifs_tcon *tcon;
1039
1040 if (sd->sb)
1041 return;
1042
1043 cifs_sb = CIFS_SB(sb);
1044 tcon = cifs_sb_master_tcon(cifs_sb);
1045 if (tcon->ses->server == server)
1046 sd->sb = sb;
1047}
1048
1049static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1050 void *data)
1051{
1052 struct super_cb_data sd = {
1053 .data = data,
1054 .sb = NULL,
1055 };
1056
1057 iterate_supers_type(&cifs_fs_type, f, &sd);
1058
1059 if (!sd.sb)
1060 return ERR_PTR(-EINVAL);
1061 /*
1062 * Grab an active reference in order to prevent automounts (DFS links)
1063 * of expiring and then freeing up our cifs superblock pointer while
1064 * we're doing failover.
1065 */
1066 cifs_sb_active(sd.sb);
1067 return sd.sb;
1068}
1069
1070static void __cifs_put_super(struct super_block *sb)
1071{
1072 if (!IS_ERR_OR_NULL(sb))
1073 cifs_sb_deactive(sb);
1074}
1075
1076struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
1077{
1078 return __cifs_get_super(tcp_super_cb, server);
1079}
1080
1081void cifs_put_tcp_super(struct super_block *sb)
1082{
1083 __cifs_put_super(sb);
1084}
1085
1086#ifdef CONFIG_CIFS_DFS_UPCALL
1087int match_target_ip(struct TCP_Server_Info *server,
1088 const char *share, size_t share_len,
1089 bool *result)
1090{
1091 int rc;
1092 char *target, *tip = NULL;
1093 struct sockaddr tipaddr;
1094
1095 *result = false;
1096
1097 target = kzalloc(share_len + 3, GFP_KERNEL);
1098 if (!target) {
1099 rc = -ENOMEM;
1100 goto out;
1101 }
1102
1103 scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1104
1105 cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1106
1107 rc = dns_resolve_server_name_to_ip(target, &tip);
1108 if (rc < 0)
1109 goto out;
1110
1111 cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
1112
1113 if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
1114 cifs_dbg(VFS, "%s: failed to convert target ip address\n",
1115 __func__);
1116 rc = -EINVAL;
1117 goto out;
1118 }
1119
1120 *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
1121 &tipaddr);
1122 cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1123 rc = 0;
1124
1125out:
1126 kfree(target);
1127 kfree(tip);
1128
1129 return rc;
1130}
1131
1132static void tcon_super_cb(struct super_block *sb, void *arg)
1133{
1134 struct super_cb_data *sd = arg;
1135 struct cifs_tcon *tcon = sd->data;
1136 struct cifs_sb_info *cifs_sb;
1137
1138 if (sd->sb)
1139 return;
1140
1141 cifs_sb = CIFS_SB(sb);
1142 if (tcon->dfs_path && cifs_sb->origin_fullpath &&
1143 !strcasecmp(tcon->dfs_path, cifs_sb->origin_fullpath))
1144 sd->sb = sb;
1145}
1146
1147static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
1148{
1149 return __cifs_get_super(tcon_super_cb, tcon);
1150}
1151
1152static inline void cifs_put_tcon_super(struct super_block *sb)
1153{
1154 __cifs_put_super(sb);
1155}
1156#else
1157static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
1158{
1159 return ERR_PTR(-EOPNOTSUPP);
1160}
1161
1162static inline void cifs_put_tcon_super(struct super_block *sb)
1163{
1164}
1165#endif
1166
1167int update_super_prepath(struct cifs_tcon *tcon, char *prefix)
1168{
1169 struct super_block *sb;
1170 struct cifs_sb_info *cifs_sb;
1171 int rc = 0;
1172
1173 sb = cifs_get_tcon_super(tcon);
1174 if (IS_ERR(sb))
1175 return PTR_ERR(sb);
1176
1177 cifs_sb = CIFS_SB(sb);
1178
1179 kfree(cifs_sb->prepath);
1180
1181 if (prefix && *prefix) {
1182 cifs_sb->prepath = kstrndup(prefix, strlen(prefix), GFP_ATOMIC);
1183 if (!cifs_sb->prepath) {
1184 rc = -ENOMEM;
1185 goto out;
1186 }
1187
1188 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1189 } else
1190 cifs_sb->prepath = NULL;
1191
1192 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1193
1194out:
1195 cifs_put_tcon_super(sb);
1196 return rc;
1197}