Loading...
1/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
29#include "smberr.h"
30#include "nterr.h"
31#include "cifs_unicode.h"
32
33extern mempool_t *cifs_sm_req_poolp;
34extern mempool_t *cifs_req_poolp;
35
36/* The xid serves as a useful identifier for each incoming vfs request,
37 in a similar way to the mid which is useful to track each sent smb,
38 and CurrentXid can also provide a running counter (although it
39 will eventually wrap past zero) of the total vfs operations handled
40 since the cifs fs was mounted */
41
42unsigned int
43_GetXid(void)
44{
45 unsigned int xid;
46
47 spin_lock(&GlobalMid_Lock);
48 GlobalTotalActiveXid++;
49
50 /* keep high water mark for number of simultaneous ops in filesystem */
51 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
52 GlobalMaxActiveXid = GlobalTotalActiveXid;
53 if (GlobalTotalActiveXid > 65000)
54 cFYI(1, "warning: more than 65000 requests active");
55 xid = GlobalCurrentXid++;
56 spin_unlock(&GlobalMid_Lock);
57 return xid;
58}
59
60void
61_FreeXid(unsigned int xid)
62{
63 spin_lock(&GlobalMid_Lock);
64 /* if (GlobalTotalActiveXid == 0)
65 BUG(); */
66 GlobalTotalActiveXid--;
67 spin_unlock(&GlobalMid_Lock);
68}
69
70struct cifs_ses *
71sesInfoAlloc(void)
72{
73 struct cifs_ses *ret_buf;
74
75 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
76 if (ret_buf) {
77 atomic_inc(&sesInfoAllocCount);
78 ret_buf->status = CifsNew;
79 ++ret_buf->ses_count;
80 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
81 INIT_LIST_HEAD(&ret_buf->tcon_list);
82 mutex_init(&ret_buf->session_mutex);
83 }
84 return ret_buf;
85}
86
87void
88sesInfoFree(struct cifs_ses *buf_to_free)
89{
90 if (buf_to_free == NULL) {
91 cFYI(1, "Null buffer passed to sesInfoFree");
92 return;
93 }
94
95 atomic_dec(&sesInfoAllocCount);
96 kfree(buf_to_free->serverOS);
97 kfree(buf_to_free->serverDomain);
98 kfree(buf_to_free->serverNOS);
99 if (buf_to_free->password) {
100 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
101 kfree(buf_to_free->password);
102 }
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kfree(buf_to_free);
106}
107
108struct cifs_tcon *
109tconInfoAlloc(void)
110{
111 struct cifs_tcon *ret_buf;
112 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
113 if (ret_buf) {
114 atomic_inc(&tconInfoAllocCount);
115 ret_buf->tidStatus = CifsNew;
116 ++ret_buf->tc_count;
117 INIT_LIST_HEAD(&ret_buf->openFileList);
118 INIT_LIST_HEAD(&ret_buf->tcon_list);
119#ifdef CONFIG_CIFS_STATS
120 spin_lock_init(&ret_buf->stat_lock);
121#endif
122 }
123 return ret_buf;
124}
125
126void
127tconInfoFree(struct cifs_tcon *buf_to_free)
128{
129 if (buf_to_free == NULL) {
130 cFYI(1, "Null buffer passed to tconInfoFree");
131 return;
132 }
133 atomic_dec(&tconInfoAllocCount);
134 kfree(buf_to_free->nativeFileSystem);
135 if (buf_to_free->password) {
136 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
137 kfree(buf_to_free->password);
138 }
139 kfree(buf_to_free);
140}
141
142struct smb_hdr *
143cifs_buf_get(void)
144{
145 struct smb_hdr *ret_buf = NULL;
146
147/* We could use negotiated size instead of max_msgsize -
148 but it may be more efficient to always alloc same size
149 albeit slightly larger than necessary and maxbuffersize
150 defaults to this and can not be bigger */
151 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
152
153 /* clear the first few header bytes */
154 /* for most paths, more is cleared in header_assemble */
155 if (ret_buf) {
156 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
157 atomic_inc(&bufAllocCount);
158#ifdef CONFIG_CIFS_STATS2
159 atomic_inc(&totBufAllocCount);
160#endif /* CONFIG_CIFS_STATS2 */
161 }
162
163 return ret_buf;
164}
165
166void
167cifs_buf_release(void *buf_to_free)
168{
169 if (buf_to_free == NULL) {
170 /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
171 return;
172 }
173 mempool_free(buf_to_free, cifs_req_poolp);
174
175 atomic_dec(&bufAllocCount);
176 return;
177}
178
179struct smb_hdr *
180cifs_small_buf_get(void)
181{
182 struct smb_hdr *ret_buf = NULL;
183
184/* We could use negotiated size instead of max_msgsize -
185 but it may be more efficient to always alloc same size
186 albeit slightly larger than necessary and maxbuffersize
187 defaults to this and can not be bigger */
188 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
189 if (ret_buf) {
190 /* No need to clear memory here, cleared in header assemble */
191 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
192 atomic_inc(&smBufAllocCount);
193#ifdef CONFIG_CIFS_STATS2
194 atomic_inc(&totSmBufAllocCount);
195#endif /* CONFIG_CIFS_STATS2 */
196
197 }
198 return ret_buf;
199}
200
201void
202cifs_small_buf_release(void *buf_to_free)
203{
204
205 if (buf_to_free == NULL) {
206 cFYI(1, "Null buffer passed to cifs_small_buf_release");
207 return;
208 }
209 mempool_free(buf_to_free, cifs_sm_req_poolp);
210
211 atomic_dec(&smBufAllocCount);
212 return;
213}
214
215/*
216 Find a free multiplex id (SMB mid). Otherwise there could be
217 mid collisions which might cause problems, demultiplexing the
218 wrong response to this request. Multiplex ids could collide if
219 one of a series requests takes much longer than the others, or
220 if a very large number of long lived requests (byte range
221 locks or FindNotify requests) are pending. No more than
222 64K-1 requests can be outstanding at one time. If no
223 mids are available, return zero. A future optimization
224 could make the combination of mids and uid the key we use
225 to demultiplex on (rather than mid alone).
226 In addition to the above check, the cifs demultiplex
227 code already used the command code as a secondary
228 check of the frame and if signing is negotiated the
229 response would be discarded if the mid were the same
230 but the signature was wrong. Since the mid is not put in the
231 pending queue until later (when it is about to be dispatched)
232 we do have to limit the number of outstanding requests
233 to somewhat less than 64K-1 although it is hard to imagine
234 so many threads being in the vfs at one time.
235*/
236__u16 GetNextMid(struct TCP_Server_Info *server)
237{
238 __u16 mid = 0;
239 __u16 last_mid;
240 bool collision;
241
242 spin_lock(&GlobalMid_Lock);
243 last_mid = server->CurrentMid; /* we do not want to loop forever */
244 server->CurrentMid++;
245 /* This nested loop looks more expensive than it is.
246 In practice the list of pending requests is short,
247 fewer than 50, and the mids are likely to be unique
248 on the first pass through the loop unless some request
249 takes longer than the 64 thousand requests before it
250 (and it would also have to have been a request that
251 did not time out) */
252 while (server->CurrentMid != last_mid) {
253 struct mid_q_entry *mid_entry;
254 unsigned int num_mids;
255
256 collision = false;
257 if (server->CurrentMid == 0)
258 server->CurrentMid++;
259
260 num_mids = 0;
261 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
262 ++num_mids;
263 if (mid_entry->mid == server->CurrentMid &&
264 mid_entry->midState == MID_REQUEST_SUBMITTED) {
265 /* This mid is in use, try a different one */
266 collision = true;
267 break;
268 }
269 }
270
271 /*
272 * if we have more than 32k mids in the list, then something
273 * is very wrong. Possibly a local user is trying to DoS the
274 * box by issuing long-running calls and SIGKILL'ing them. If
275 * we get to 2^16 mids then we're in big trouble as this
276 * function could loop forever.
277 *
278 * Go ahead and assign out the mid in this situation, but force
279 * an eventual reconnect to clean out the pending_mid_q.
280 */
281 if (num_mids > 32768)
282 server->tcpStatus = CifsNeedReconnect;
283
284 if (!collision) {
285 mid = server->CurrentMid;
286 break;
287 }
288 server->CurrentMid++;
289 }
290 spin_unlock(&GlobalMid_Lock);
291 return mid;
292}
293
294/* NB: MID can not be set if treeCon not passed in, in that
295 case it is responsbility of caller to set the mid */
296void
297header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
298 const struct cifs_tcon *treeCon, int word_count
299 /* length of fixed section (word count) in two byte units */)
300{
301 struct list_head *temp_item;
302 struct cifs_ses *ses;
303 char *temp = (char *) buffer;
304
305 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
306
307 buffer->smb_buf_length = cpu_to_be32(
308 (2 * word_count) + sizeof(struct smb_hdr) -
309 4 /* RFC 1001 length field does not count */ +
310 2 /* for bcc field itself */) ;
311
312 buffer->Protocol[0] = 0xFF;
313 buffer->Protocol[1] = 'S';
314 buffer->Protocol[2] = 'M';
315 buffer->Protocol[3] = 'B';
316 buffer->Command = smb_command;
317 buffer->Flags = 0x00; /* case sensitive */
318 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
319 buffer->Pid = cpu_to_le16((__u16)current->tgid);
320 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
321 if (treeCon) {
322 buffer->Tid = treeCon->tid;
323 if (treeCon->ses) {
324 if (treeCon->ses->capabilities & CAP_UNICODE)
325 buffer->Flags2 |= SMBFLG2_UNICODE;
326 if (treeCon->ses->capabilities & CAP_STATUS32)
327 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
328
329 /* Uid is not converted */
330 buffer->Uid = treeCon->ses->Suid;
331 buffer->Mid = GetNextMid(treeCon->ses->server);
332 if (multiuser_mount != 0) {
333 /* For the multiuser case, there are few obvious technically */
334 /* possible mechanisms to match the local linux user (uid) */
335 /* to a valid remote smb user (smb_uid): */
336 /* 1) Query Winbind (or other local pam/nss daemon */
337 /* for userid/password/logon_domain or credential */
338 /* 2) Query Winbind for uid to sid to username mapping */
339 /* and see if we have a matching password for existing*/
340 /* session for that user perhas getting password by */
341 /* adding a new pam_cifs module that stores passwords */
342 /* so that the cifs vfs can get at that for all logged*/
343 /* on users */
344 /* 3) (Which is the mechanism we have chosen) */
345 /* Search through sessions to the same server for a */
346 /* a match on the uid that was passed in on mount */
347 /* with the current processes uid (or euid?) and use */
348 /* that smb uid. If no existing smb session for */
349 /* that uid found, use the default smb session ie */
350 /* the smb session for the volume mounted which is */
351 /* the same as would be used if the multiuser mount */
352 /* flag were disabled. */
353
354 /* BB Add support for establishing new tCon and SMB Session */
355 /* with userid/password pairs found on the smb session */
356 /* for other target tcp/ip addresses BB */
357 if (current_fsuid() != treeCon->ses->linux_uid) {
358 cFYI(1, "Multiuser mode and UID "
359 "did not match tcon uid");
360 spin_lock(&cifs_tcp_ses_lock);
361 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
362 ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
363 if (ses->linux_uid == current_fsuid()) {
364 if (ses->server == treeCon->ses->server) {
365 cFYI(1, "found matching uid substitute right smb_uid");
366 buffer->Uid = ses->Suid;
367 break;
368 } else {
369 /* BB eventually call cifs_setup_session here */
370 cFYI(1, "local UID found but no smb sess with this server exists");
371 }
372 }
373 }
374 spin_unlock(&cifs_tcp_ses_lock);
375 }
376 }
377 }
378 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
379 buffer->Flags2 |= SMBFLG2_DFS;
380 if (treeCon->nocase)
381 buffer->Flags |= SMBFLG_CASELESS;
382 if ((treeCon->ses) && (treeCon->ses->server))
383 if (treeCon->ses->server->sec_mode &
384 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
385 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
386 }
387
388/* endian conversion of flags is now done just before sending */
389 buffer->WordCount = (char) word_count;
390 return;
391}
392
393static int
394check_smb_hdr(struct smb_hdr *smb, __u16 mid)
395{
396 /* does it have the right SMB "signature" ? */
397 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
398 cERROR(1, "Bad protocol string signature header 0x%x",
399 *(unsigned int *)smb->Protocol);
400 return 1;
401 }
402
403 /* Make sure that message ids match */
404 if (mid != smb->Mid) {
405 cERROR(1, "Mids do not match. received=%u expected=%u",
406 smb->Mid, mid);
407 return 1;
408 }
409
410 /* if it's a response then accept */
411 if (smb->Flags & SMBFLG_RESPONSE)
412 return 0;
413
414 /* only one valid case where server sends us request */
415 if (smb->Command == SMB_COM_LOCKING_ANDX)
416 return 0;
417
418 cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
419 return 1;
420}
421
422int
423checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
424{
425 __u32 len = be32_to_cpu(smb->smb_buf_length);
426 __u32 clc_len; /* calculated length */
427 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
428
429 if (length < 2 + sizeof(struct smb_hdr)) {
430 if ((length >= sizeof(struct smb_hdr) - 1)
431 && (smb->Status.CifsError != 0)) {
432 smb->WordCount = 0;
433 /* some error cases do not return wct and bcc */
434 return 0;
435 } else if ((length == sizeof(struct smb_hdr) + 1) &&
436 (smb->WordCount == 0)) {
437 char *tmp = (char *)smb;
438 /* Need to work around a bug in two servers here */
439 /* First, check if the part of bcc they sent was zero */
440 if (tmp[sizeof(struct smb_hdr)] == 0) {
441 /* some servers return only half of bcc
442 * on simple responses (wct, bcc both zero)
443 * in particular have seen this on
444 * ulogoffX and FindClose. This leaves
445 * one byte of bcc potentially unitialized
446 */
447 /* zero rest of bcc */
448 tmp[sizeof(struct smb_hdr)+1] = 0;
449 return 0;
450 }
451 cERROR(1, "rcvd invalid byte count (bcc)");
452 } else {
453 cERROR(1, "Length less than smb header size");
454 }
455 return 1;
456 }
457 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
458 cERROR(1, "smb length greater than MaxBufSize, mid=%d",
459 smb->Mid);
460 return 1;
461 }
462
463 if (check_smb_hdr(smb, mid))
464 return 1;
465 clc_len = smbCalcSize(smb);
466
467 if (4 + len != length) {
468 cERROR(1, "Length read does not match RFC1001 length %d",
469 len);
470 return 1;
471 }
472
473 if (4 + len != clc_len) {
474 /* check if bcc wrapped around for large read responses */
475 if ((len > 64 * 1024) && (len > clc_len)) {
476 /* check if lengths match mod 64K */
477 if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
478 return 0; /* bcc wrapped */
479 }
480 cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
481 clc_len, 4 + len, smb->Mid);
482
483 if (4 + len < clc_len) {
484 cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
485 len, smb->Mid);
486 return 1;
487 } else if (len > clc_len + 512) {
488 /*
489 * Some servers (Windows XP in particular) send more
490 * data than the lengths in the SMB packet would
491 * indicate on certain calls (byte range locks and
492 * trans2 find first calls in particular). While the
493 * client can handle such a frame by ignoring the
494 * trailing data, we choose limit the amount of extra
495 * data to 512 bytes.
496 */
497 cERROR(1, "RFC1001 size %u more than 512 bytes larger "
498 "than SMB for mid=%u", len, smb->Mid);
499 return 1;
500 }
501 }
502 return 0;
503}
504
505bool
506is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
507{
508 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
509 struct list_head *tmp, *tmp1, *tmp2;
510 struct cifs_ses *ses;
511 struct cifs_tcon *tcon;
512 struct cifsInodeInfo *pCifsInode;
513 struct cifsFileInfo *netfile;
514
515 cFYI(1, "Checking for oplock break or dnotify response");
516 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
517 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
518 struct smb_com_transaction_change_notify_rsp *pSMBr =
519 (struct smb_com_transaction_change_notify_rsp *)buf;
520 struct file_notify_information *pnotify;
521 __u32 data_offset = 0;
522 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
523 data_offset = le32_to_cpu(pSMBr->DataOffset);
524
525 pnotify = (struct file_notify_information *)
526 ((char *)&pSMBr->hdr.Protocol + data_offset);
527 cFYI(1, "dnotify on %s Action: 0x%x",
528 pnotify->FileName, pnotify->Action);
529 /* cifs_dump_mem("Rcvd notify Data: ",buf,
530 sizeof(struct smb_hdr)+60); */
531 return true;
532 }
533 if (pSMBr->hdr.Status.CifsError) {
534 cFYI(1, "notify err 0x%d",
535 pSMBr->hdr.Status.CifsError);
536 return true;
537 }
538 return false;
539 }
540 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
541 return false;
542 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
543 /* no sense logging error on invalid handle on oplock
544 break - harmless race between close request and oplock
545 break response is expected from time to time writing out
546 large dirty files cached on the client */
547 if ((NT_STATUS_INVALID_HANDLE) ==
548 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
549 cFYI(1, "invalid handle on oplock break");
550 return true;
551 } else if (ERRbadfid ==
552 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
553 return true;
554 } else {
555 return false; /* on valid oplock brk we get "request" */
556 }
557 }
558 if (pSMB->hdr.WordCount != 8)
559 return false;
560
561 cFYI(1, "oplock type 0x%d level 0x%d",
562 pSMB->LockType, pSMB->OplockLevel);
563 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
564 return false;
565
566 /* look up tcon based on tid & uid */
567 spin_lock(&cifs_tcp_ses_lock);
568 list_for_each(tmp, &srv->smb_ses_list) {
569 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
570 list_for_each(tmp1, &ses->tcon_list) {
571 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
572 if (tcon->tid != buf->Tid)
573 continue;
574
575 cifs_stats_inc(&tcon->num_oplock_brks);
576 spin_lock(&cifs_file_list_lock);
577 list_for_each(tmp2, &tcon->openFileList) {
578 netfile = list_entry(tmp2, struct cifsFileInfo,
579 tlist);
580 if (pSMB->Fid != netfile->netfid)
581 continue;
582
583 cFYI(1, "file id match, oplock break");
584 pCifsInode = CIFS_I(netfile->dentry->d_inode);
585
586 cifs_set_oplock_level(pCifsInode,
587 pSMB->OplockLevel ? OPLOCK_READ : 0);
588 queue_work(system_nrt_wq,
589 &netfile->oplock_break);
590 netfile->oplock_break_cancelled = false;
591
592 spin_unlock(&cifs_file_list_lock);
593 spin_unlock(&cifs_tcp_ses_lock);
594 return true;
595 }
596 spin_unlock(&cifs_file_list_lock);
597 spin_unlock(&cifs_tcp_ses_lock);
598 cFYI(1, "No matching file for oplock break");
599 return true;
600 }
601 }
602 spin_unlock(&cifs_tcp_ses_lock);
603 cFYI(1, "Can not process oplock break for non-existent connection");
604 return true;
605}
606
607void
608dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
609{
610 int i, j;
611 char debug_line[17];
612 unsigned char *buffer;
613
614 if (traceSMB == 0)
615 return;
616
617 buffer = (unsigned char *) smb_buf;
618 for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
619 if (i % 8 == 0) {
620 /* have reached the beginning of line */
621 printk(KERN_DEBUG "| ");
622 j = 0;
623 }
624 printk("%0#4x ", buffer[i]);
625 debug_line[2 * j] = ' ';
626 if (isprint(buffer[i]))
627 debug_line[1 + (2 * j)] = buffer[i];
628 else
629 debug_line[1 + (2 * j)] = '_';
630
631 if (i % 8 == 7) {
632 /* reached end of line, time to print ascii */
633 debug_line[16] = 0;
634 printk(" | %s\n", debug_line);
635 }
636 }
637 for (; j < 8; j++) {
638 printk(" ");
639 debug_line[2 * j] = ' ';
640 debug_line[1 + (2 * j)] = ' ';
641 }
642 printk(" | %s\n", debug_line);
643 return;
644}
645
646void
647cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
648{
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
650 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
651 cERROR(1, "Autodisabling the use of server inode numbers on "
652 "%s. This server doesn't seem to support them "
653 "properly. Hardlinks will not be recognized on this "
654 "mount. Consider mounting with the \"noserverino\" "
655 "option to silence this message.",
656 cifs_sb_master_tcon(cifs_sb)->treeName);
657 }
658}
659
660void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
661{
662 oplock &= 0xF;
663
664 if (oplock == OPLOCK_EXCLUSIVE) {
665 cinode->clientCanCacheAll = true;
666 cinode->clientCanCacheRead = true;
667 cFYI(1, "Exclusive Oplock granted on inode %p",
668 &cinode->vfs_inode);
669 } else if (oplock == OPLOCK_READ) {
670 cinode->clientCanCacheAll = false;
671 cinode->clientCanCacheRead = true;
672 cFYI(1, "Level II Oplock granted on inode %p",
673 &cinode->vfs_inode);
674 } else {
675 cinode->clientCanCacheAll = false;
676 cinode->clientCanCacheRead = false;
677 }
678}
1/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include <linux/vmalloc.h>
26#include "cifspdu.h"
27#include "cifsglob.h"
28#include "cifsproto.h"
29#include "cifs_debug.h"
30#include "smberr.h"
31#include "nterr.h"
32#include "cifs_unicode.h"
33#include "smb2pdu.h"
34
35extern mempool_t *cifs_sm_req_poolp;
36extern mempool_t *cifs_req_poolp;
37
38/* The xid serves as a useful identifier for each incoming vfs request,
39 in a similar way to the mid which is useful to track each sent smb,
40 and CurrentXid can also provide a running counter (although it
41 will eventually wrap past zero) of the total vfs operations handled
42 since the cifs fs was mounted */
43
44unsigned int
45_get_xid(void)
46{
47 unsigned int xid;
48
49 spin_lock(&GlobalMid_Lock);
50 GlobalTotalActiveXid++;
51
52 /* keep high water mark for number of simultaneous ops in filesystem */
53 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
54 GlobalMaxActiveXid = GlobalTotalActiveXid;
55 if (GlobalTotalActiveXid > 65000)
56 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
57 xid = GlobalCurrentXid++;
58 spin_unlock(&GlobalMid_Lock);
59 return xid;
60}
61
62void
63_free_xid(unsigned int xid)
64{
65 spin_lock(&GlobalMid_Lock);
66 /* if (GlobalTotalActiveXid == 0)
67 BUG(); */
68 GlobalTotalActiveXid--;
69 spin_unlock(&GlobalMid_Lock);
70}
71
72struct cifs_ses *
73sesInfoAlloc(void)
74{
75 struct cifs_ses *ret_buf;
76
77 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
78 if (ret_buf) {
79 atomic_inc(&sesInfoAllocCount);
80 ret_buf->status = CifsNew;
81 ++ret_buf->ses_count;
82 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
83 INIT_LIST_HEAD(&ret_buf->tcon_list);
84 mutex_init(&ret_buf->session_mutex);
85 spin_lock_init(&ret_buf->iface_lock);
86 }
87 return ret_buf;
88}
89
90void
91sesInfoFree(struct cifs_ses *buf_to_free)
92{
93 if (buf_to_free == NULL) {
94 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
95 return;
96 }
97
98 atomic_dec(&sesInfoAllocCount);
99 kfree(buf_to_free->serverOS);
100 kfree(buf_to_free->serverDomain);
101 kfree(buf_to_free->serverNOS);
102 kzfree(buf_to_free->password);
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kzfree(buf_to_free->auth_key.response);
106 kfree(buf_to_free->iface_list);
107 kzfree(buf_to_free);
108}
109
110struct cifs_tcon *
111tconInfoAlloc(void)
112{
113 struct cifs_tcon *ret_buf;
114
115 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
116 if (!ret_buf)
117 return NULL;
118 ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL);
119 if (!ret_buf->crfid.fid) {
120 kfree(ret_buf);
121 return NULL;
122 }
123
124 atomic_inc(&tconInfoAllocCount);
125 ret_buf->tidStatus = CifsNew;
126 ++ret_buf->tc_count;
127 INIT_LIST_HEAD(&ret_buf->openFileList);
128 INIT_LIST_HEAD(&ret_buf->tcon_list);
129 spin_lock_init(&ret_buf->open_file_lock);
130 mutex_init(&ret_buf->crfid.fid_mutex);
131 spin_lock_init(&ret_buf->stat_lock);
132 atomic_set(&ret_buf->num_local_opens, 0);
133 atomic_set(&ret_buf->num_remote_opens, 0);
134
135 return ret_buf;
136}
137
138void
139tconInfoFree(struct cifs_tcon *buf_to_free)
140{
141 if (buf_to_free == NULL) {
142 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
143 return;
144 }
145 atomic_dec(&tconInfoAllocCount);
146 kfree(buf_to_free->nativeFileSystem);
147 kzfree(buf_to_free->password);
148 kfree(buf_to_free->crfid.fid);
149#ifdef CONFIG_CIFS_DFS_UPCALL
150 kfree(buf_to_free->dfs_path);
151#endif
152 kfree(buf_to_free);
153}
154
155struct smb_hdr *
156cifs_buf_get(void)
157{
158 struct smb_hdr *ret_buf = NULL;
159 /*
160 * SMB2 header is bigger than CIFS one - no problems to clean some
161 * more bytes for CIFS.
162 */
163 size_t buf_size = sizeof(struct smb2_sync_hdr);
164
165 /*
166 * We could use negotiated size instead of max_msgsize -
167 * but it may be more efficient to always alloc same size
168 * albeit slightly larger than necessary and maxbuffersize
169 * defaults to this and can not be bigger.
170 */
171 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
172
173 /* clear the first few header bytes */
174 /* for most paths, more is cleared in header_assemble */
175 memset(ret_buf, 0, buf_size + 3);
176 atomic_inc(&bufAllocCount);
177#ifdef CONFIG_CIFS_STATS2
178 atomic_inc(&totBufAllocCount);
179#endif /* CONFIG_CIFS_STATS2 */
180
181 return ret_buf;
182}
183
184void
185cifs_buf_release(void *buf_to_free)
186{
187 if (buf_to_free == NULL) {
188 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
189 return;
190 }
191 mempool_free(buf_to_free, cifs_req_poolp);
192
193 atomic_dec(&bufAllocCount);
194 return;
195}
196
197struct smb_hdr *
198cifs_small_buf_get(void)
199{
200 struct smb_hdr *ret_buf = NULL;
201
202/* We could use negotiated size instead of max_msgsize -
203 but it may be more efficient to always alloc same size
204 albeit slightly larger than necessary and maxbuffersize
205 defaults to this and can not be bigger */
206 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
207 /* No need to clear memory here, cleared in header assemble */
208 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
209 atomic_inc(&smBufAllocCount);
210#ifdef CONFIG_CIFS_STATS2
211 atomic_inc(&totSmBufAllocCount);
212#endif /* CONFIG_CIFS_STATS2 */
213
214 return ret_buf;
215}
216
217void
218cifs_small_buf_release(void *buf_to_free)
219{
220
221 if (buf_to_free == NULL) {
222 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
223 return;
224 }
225 mempool_free(buf_to_free, cifs_sm_req_poolp);
226
227 atomic_dec(&smBufAllocCount);
228 return;
229}
230
231void
232free_rsp_buf(int resp_buftype, void *rsp)
233{
234 if (resp_buftype == CIFS_SMALL_BUFFER)
235 cifs_small_buf_release(rsp);
236 else if (resp_buftype == CIFS_LARGE_BUFFER)
237 cifs_buf_release(rsp);
238}
239
240/* NB: MID can not be set if treeCon not passed in, in that
241 case it is responsbility of caller to set the mid */
242void
243header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
244 const struct cifs_tcon *treeCon, int word_count
245 /* length of fixed section (word count) in two byte units */)
246{
247 char *temp = (char *) buffer;
248
249 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
250
251 buffer->smb_buf_length = cpu_to_be32(
252 (2 * word_count) + sizeof(struct smb_hdr) -
253 4 /* RFC 1001 length field does not count */ +
254 2 /* for bcc field itself */) ;
255
256 buffer->Protocol[0] = 0xFF;
257 buffer->Protocol[1] = 'S';
258 buffer->Protocol[2] = 'M';
259 buffer->Protocol[3] = 'B';
260 buffer->Command = smb_command;
261 buffer->Flags = 0x00; /* case sensitive */
262 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
263 buffer->Pid = cpu_to_le16((__u16)current->tgid);
264 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
265 if (treeCon) {
266 buffer->Tid = treeCon->tid;
267 if (treeCon->ses) {
268 if (treeCon->ses->capabilities & CAP_UNICODE)
269 buffer->Flags2 |= SMBFLG2_UNICODE;
270 if (treeCon->ses->capabilities & CAP_STATUS32)
271 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
272
273 /* Uid is not converted */
274 buffer->Uid = treeCon->ses->Suid;
275 buffer->Mid = get_next_mid(treeCon->ses->server);
276 }
277 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
278 buffer->Flags2 |= SMBFLG2_DFS;
279 if (treeCon->nocase)
280 buffer->Flags |= SMBFLG_CASELESS;
281 if ((treeCon->ses) && (treeCon->ses->server))
282 if (treeCon->ses->server->sign)
283 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
284 }
285
286/* endian conversion of flags is now done just before sending */
287 buffer->WordCount = (char) word_count;
288 return;
289}
290
291static int
292check_smb_hdr(struct smb_hdr *smb)
293{
294 /* does it have the right SMB "signature" ? */
295 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
296 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
297 *(unsigned int *)smb->Protocol);
298 return 1;
299 }
300
301 /* if it's a response then accept */
302 if (smb->Flags & SMBFLG_RESPONSE)
303 return 0;
304
305 /* only one valid case where server sends us request */
306 if (smb->Command == SMB_COM_LOCKING_ANDX)
307 return 0;
308
309 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
310 get_mid(smb));
311 return 1;
312}
313
314int
315checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
316{
317 struct smb_hdr *smb = (struct smb_hdr *)buf;
318 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
319 __u32 clc_len; /* calculated length */
320 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
321 total_read, rfclen);
322
323 /* is this frame too small to even get to a BCC? */
324 if (total_read < 2 + sizeof(struct smb_hdr)) {
325 if ((total_read >= sizeof(struct smb_hdr) - 1)
326 && (smb->Status.CifsError != 0)) {
327 /* it's an error return */
328 smb->WordCount = 0;
329 /* some error cases do not return wct and bcc */
330 return 0;
331 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
332 (smb->WordCount == 0)) {
333 char *tmp = (char *)smb;
334 /* Need to work around a bug in two servers here */
335 /* First, check if the part of bcc they sent was zero */
336 if (tmp[sizeof(struct smb_hdr)] == 0) {
337 /* some servers return only half of bcc
338 * on simple responses (wct, bcc both zero)
339 * in particular have seen this on
340 * ulogoffX and FindClose. This leaves
341 * one byte of bcc potentially unitialized
342 */
343 /* zero rest of bcc */
344 tmp[sizeof(struct smb_hdr)+1] = 0;
345 return 0;
346 }
347 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
348 } else {
349 cifs_dbg(VFS, "Length less than smb header size\n");
350 }
351 return -EIO;
352 }
353
354 /* otherwise, there is enough to get to the BCC */
355 if (check_smb_hdr(smb))
356 return -EIO;
357 clc_len = smbCalcSize(smb, server);
358
359 if (4 + rfclen != total_read) {
360 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
361 rfclen);
362 return -EIO;
363 }
364
365 if (4 + rfclen != clc_len) {
366 __u16 mid = get_mid(smb);
367 /* check if bcc wrapped around for large read responses */
368 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
369 /* check if lengths match mod 64K */
370 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
371 return 0; /* bcc wrapped */
372 }
373 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
374 clc_len, 4 + rfclen, mid);
375
376 if (4 + rfclen < clc_len) {
377 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
378 rfclen, mid);
379 return -EIO;
380 } else if (rfclen > clc_len + 512) {
381 /*
382 * Some servers (Windows XP in particular) send more
383 * data than the lengths in the SMB packet would
384 * indicate on certain calls (byte range locks and
385 * trans2 find first calls in particular). While the
386 * client can handle such a frame by ignoring the
387 * trailing data, we choose limit the amount of extra
388 * data to 512 bytes.
389 */
390 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
391 rfclen, mid);
392 return -EIO;
393 }
394 }
395 return 0;
396}
397
398bool
399is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
400{
401 struct smb_hdr *buf = (struct smb_hdr *)buffer;
402 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
403 struct list_head *tmp, *tmp1, *tmp2;
404 struct cifs_ses *ses;
405 struct cifs_tcon *tcon;
406 struct cifsInodeInfo *pCifsInode;
407 struct cifsFileInfo *netfile;
408
409 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
410 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
411 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
412 struct smb_com_transaction_change_notify_rsp *pSMBr =
413 (struct smb_com_transaction_change_notify_rsp *)buf;
414 struct file_notify_information *pnotify;
415 __u32 data_offset = 0;
416 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
417
418 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
419 data_offset = le32_to_cpu(pSMBr->DataOffset);
420
421 if (data_offset >
422 len - sizeof(struct file_notify_information)) {
423 cifs_dbg(FYI, "invalid data_offset %u\n",
424 data_offset);
425 return true;
426 }
427 pnotify = (struct file_notify_information *)
428 ((char *)&pSMBr->hdr.Protocol + data_offset);
429 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
430 pnotify->FileName, pnotify->Action);
431 /* cifs_dump_mem("Rcvd notify Data: ",buf,
432 sizeof(struct smb_hdr)+60); */
433 return true;
434 }
435 if (pSMBr->hdr.Status.CifsError) {
436 cifs_dbg(FYI, "notify err 0x%x\n",
437 pSMBr->hdr.Status.CifsError);
438 return true;
439 }
440 return false;
441 }
442 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
443 return false;
444 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
445 /* no sense logging error on invalid handle on oplock
446 break - harmless race between close request and oplock
447 break response is expected from time to time writing out
448 large dirty files cached on the client */
449 if ((NT_STATUS_INVALID_HANDLE) ==
450 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
451 cifs_dbg(FYI, "invalid handle on oplock break\n");
452 return true;
453 } else if (ERRbadfid ==
454 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
455 return true;
456 } else {
457 return false; /* on valid oplock brk we get "request" */
458 }
459 }
460 if (pSMB->hdr.WordCount != 8)
461 return false;
462
463 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
464 pSMB->LockType, pSMB->OplockLevel);
465 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
466 return false;
467
468 /* look up tcon based on tid & uid */
469 spin_lock(&cifs_tcp_ses_lock);
470 list_for_each(tmp, &srv->smb_ses_list) {
471 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
472 list_for_each(tmp1, &ses->tcon_list) {
473 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
474 if (tcon->tid != buf->Tid)
475 continue;
476
477 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
478 spin_lock(&tcon->open_file_lock);
479 list_for_each(tmp2, &tcon->openFileList) {
480 netfile = list_entry(tmp2, struct cifsFileInfo,
481 tlist);
482 if (pSMB->Fid != netfile->fid.netfid)
483 continue;
484
485 cifs_dbg(FYI, "file id match, oplock break\n");
486 pCifsInode = CIFS_I(d_inode(netfile->dentry));
487
488 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
489 &pCifsInode->flags);
490
491 /*
492 * Set flag if the server downgrades the oplock
493 * to L2 else clear.
494 */
495 if (pSMB->OplockLevel)
496 set_bit(
497 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
498 &pCifsInode->flags);
499 else
500 clear_bit(
501 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
502 &pCifsInode->flags);
503
504 cifs_queue_oplock_break(netfile);
505 netfile->oplock_break_cancelled = false;
506
507 spin_unlock(&tcon->open_file_lock);
508 spin_unlock(&cifs_tcp_ses_lock);
509 return true;
510 }
511 spin_unlock(&tcon->open_file_lock);
512 spin_unlock(&cifs_tcp_ses_lock);
513 cifs_dbg(FYI, "No matching file for oplock break\n");
514 return true;
515 }
516 }
517 spin_unlock(&cifs_tcp_ses_lock);
518 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
519 return true;
520}
521
522void
523dump_smb(void *buf, int smb_buf_length)
524{
525 if (traceSMB == 0)
526 return;
527
528 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
529 smb_buf_length, true);
530}
531
532void
533cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
534{
535 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
536 struct cifs_tcon *tcon = NULL;
537
538 if (cifs_sb->master_tlink)
539 tcon = cifs_sb_master_tcon(cifs_sb);
540
541 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
542 cifs_sb->mnt_cifs_serverino_autodisabled = true;
543 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s.\n",
544 tcon ? tcon->treeName : "new server");
545 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS).\n");
546 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
547
548 }
549}
550
551void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
552{
553 oplock &= 0xF;
554
555 if (oplock == OPLOCK_EXCLUSIVE) {
556 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
557 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
558 &cinode->vfs_inode);
559 } else if (oplock == OPLOCK_READ) {
560 cinode->oplock = CIFS_CACHE_READ_FLG;
561 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
562 &cinode->vfs_inode);
563 } else
564 cinode->oplock = 0;
565}
566
567/*
568 * We wait for oplock breaks to be processed before we attempt to perform
569 * writes.
570 */
571int cifs_get_writer(struct cifsInodeInfo *cinode)
572{
573 int rc;
574
575start:
576 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
577 TASK_KILLABLE);
578 if (rc)
579 return rc;
580
581 spin_lock(&cinode->writers_lock);
582 if (!cinode->writers)
583 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
584 cinode->writers++;
585 /* Check to see if we have started servicing an oplock break */
586 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
587 cinode->writers--;
588 if (cinode->writers == 0) {
589 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
590 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
591 }
592 spin_unlock(&cinode->writers_lock);
593 goto start;
594 }
595 spin_unlock(&cinode->writers_lock);
596 return 0;
597}
598
599void cifs_put_writer(struct cifsInodeInfo *cinode)
600{
601 spin_lock(&cinode->writers_lock);
602 cinode->writers--;
603 if (cinode->writers == 0) {
604 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
605 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
606 }
607 spin_unlock(&cinode->writers_lock);
608}
609
610/**
611 * cifs_queue_oplock_break - queue the oplock break handler for cfile
612 *
613 * This function is called from the demultiplex thread when it
614 * receives an oplock break for @cfile.
615 *
616 * Assumes the tcon->open_file_lock is held.
617 * Assumes cfile->file_info_lock is NOT held.
618 */
619void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
620{
621 /*
622 * Bump the handle refcount now while we hold the
623 * open_file_lock to enforce the validity of it for the oplock
624 * break handler. The matching put is done at the end of the
625 * handler.
626 */
627 cifsFileInfo_get(cfile);
628
629 queue_work(cifsoplockd_wq, &cfile->oplock_break);
630}
631
632void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
633{
634 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
635 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
636}
637
638bool
639backup_cred(struct cifs_sb_info *cifs_sb)
640{
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
642 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
643 return true;
644 }
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
646 if (in_group_p(cifs_sb->mnt_backupgid))
647 return true;
648 }
649
650 return false;
651}
652
653void
654cifs_del_pending_open(struct cifs_pending_open *open)
655{
656 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
657 list_del(&open->olist);
658 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
659}
660
661void
662cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
663 struct cifs_pending_open *open)
664{
665 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
666 open->oplock = CIFS_OPLOCK_NO_CHANGE;
667 open->tlink = tlink;
668 fid->pending_open = open;
669 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
670}
671
672void
673cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
674 struct cifs_pending_open *open)
675{
676 spin_lock(&tlink_tcon(tlink)->open_file_lock);
677 cifs_add_pending_open_locked(fid, tlink, open);
678 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
679}
680
681/* parses DFS refferal V3 structure
682 * caller is responsible for freeing target_nodes
683 * returns:
684 * - on success - 0
685 * - on failure - errno
686 */
687int
688parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
689 unsigned int *num_of_nodes,
690 struct dfs_info3_param **target_nodes,
691 const struct nls_table *nls_codepage, int remap,
692 const char *searchName, bool is_unicode)
693{
694 int i, rc = 0;
695 char *data_end;
696 struct dfs_referral_level_3 *ref;
697
698 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
699
700 if (*num_of_nodes < 1) {
701 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
702 *num_of_nodes);
703 rc = -EINVAL;
704 goto parse_DFS_referrals_exit;
705 }
706
707 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
708 if (ref->VersionNumber != cpu_to_le16(3)) {
709 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
710 le16_to_cpu(ref->VersionNumber));
711 rc = -EINVAL;
712 goto parse_DFS_referrals_exit;
713 }
714
715 /* get the upper boundary of the resp buffer */
716 data_end = (char *)rsp + rsp_size;
717
718 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
719 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
720
721 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
722 GFP_KERNEL);
723 if (*target_nodes == NULL) {
724 rc = -ENOMEM;
725 goto parse_DFS_referrals_exit;
726 }
727
728 /* collect necessary data from referrals */
729 for (i = 0; i < *num_of_nodes; i++) {
730 char *temp;
731 int max_len;
732 struct dfs_info3_param *node = (*target_nodes)+i;
733
734 node->flags = le32_to_cpu(rsp->DFSFlags);
735 if (is_unicode) {
736 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
737 GFP_KERNEL);
738 if (tmp == NULL) {
739 rc = -ENOMEM;
740 goto parse_DFS_referrals_exit;
741 }
742 cifsConvertToUTF16((__le16 *) tmp, searchName,
743 PATH_MAX, nls_codepage, remap);
744 node->path_consumed = cifs_utf16_bytes(tmp,
745 le16_to_cpu(rsp->PathConsumed),
746 nls_codepage);
747 kfree(tmp);
748 } else
749 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
750
751 node->server_type = le16_to_cpu(ref->ServerType);
752 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
753
754 /* copy DfsPath */
755 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
756 max_len = data_end - temp;
757 node->path_name = cifs_strndup_from_utf16(temp, max_len,
758 is_unicode, nls_codepage);
759 if (!node->path_name) {
760 rc = -ENOMEM;
761 goto parse_DFS_referrals_exit;
762 }
763
764 /* copy link target UNC */
765 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
766 max_len = data_end - temp;
767 node->node_name = cifs_strndup_from_utf16(temp, max_len,
768 is_unicode, nls_codepage);
769 if (!node->node_name) {
770 rc = -ENOMEM;
771 goto parse_DFS_referrals_exit;
772 }
773
774 node->ttl = le32_to_cpu(ref->TimeToLive);
775
776 ref++;
777 }
778
779parse_DFS_referrals_exit:
780 if (rc) {
781 free_dfs_info_array(*target_nodes, *num_of_nodes);
782 *target_nodes = NULL;
783 *num_of_nodes = 0;
784 }
785 return rc;
786}
787
788struct cifs_aio_ctx *
789cifs_aio_ctx_alloc(void)
790{
791 struct cifs_aio_ctx *ctx;
792
793 /*
794 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
795 * to false so that we know when we have to unreference pages within
796 * cifs_aio_ctx_release()
797 */
798 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
799 if (!ctx)
800 return NULL;
801
802 INIT_LIST_HEAD(&ctx->list);
803 mutex_init(&ctx->aio_mutex);
804 init_completion(&ctx->done);
805 kref_init(&ctx->refcount);
806 return ctx;
807}
808
809void
810cifs_aio_ctx_release(struct kref *refcount)
811{
812 struct cifs_aio_ctx *ctx = container_of(refcount,
813 struct cifs_aio_ctx, refcount);
814
815 cifsFileInfo_put(ctx->cfile);
816
817 /*
818 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
819 * which means that iov_iter_get_pages() was a success and thus that
820 * we have taken reference on pages.
821 */
822 if (ctx->bv) {
823 unsigned i;
824
825 for (i = 0; i < ctx->npages; i++) {
826 if (ctx->should_dirty)
827 set_page_dirty(ctx->bv[i].bv_page);
828 put_page(ctx->bv[i].bv_page);
829 }
830 kvfree(ctx->bv);
831 }
832
833 kfree(ctx);
834}
835
836#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
837
838int
839setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
840{
841 ssize_t rc;
842 unsigned int cur_npages;
843 unsigned int npages = 0;
844 unsigned int i;
845 size_t len;
846 size_t count = iov_iter_count(iter);
847 unsigned int saved_len;
848 size_t start;
849 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
850 struct page **pages = NULL;
851 struct bio_vec *bv = NULL;
852
853 if (iov_iter_is_kvec(iter)) {
854 memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
855 ctx->len = count;
856 iov_iter_advance(iter, count);
857 return 0;
858 }
859
860 if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
861 bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
862 GFP_KERNEL);
863
864 if (!bv) {
865 bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
866 if (!bv)
867 return -ENOMEM;
868 }
869
870 if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
871 pages = kmalloc_array(max_pages, sizeof(struct page *),
872 GFP_KERNEL);
873
874 if (!pages) {
875 pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
876 if (!pages) {
877 kvfree(bv);
878 return -ENOMEM;
879 }
880 }
881
882 saved_len = count;
883
884 while (count && npages < max_pages) {
885 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
886 if (rc < 0) {
887 cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
888 break;
889 }
890
891 if (rc > count) {
892 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
893 count);
894 break;
895 }
896
897 iov_iter_advance(iter, rc);
898 count -= rc;
899 rc += start;
900 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
901
902 if (npages + cur_npages > max_pages) {
903 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
904 npages + cur_npages, max_pages);
905 break;
906 }
907
908 for (i = 0; i < cur_npages; i++) {
909 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
910 bv[npages + i].bv_page = pages[i];
911 bv[npages + i].bv_offset = start;
912 bv[npages + i].bv_len = len - start;
913 rc -= len;
914 start = 0;
915 }
916
917 npages += cur_npages;
918 }
919
920 kvfree(pages);
921 ctx->bv = bv;
922 ctx->len = saved_len - count;
923 ctx->npages = npages;
924 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
925 return 0;
926}
927
928/**
929 * cifs_alloc_hash - allocate hash and hash context together
930 *
931 * The caller has to make sure @sdesc is initialized to either NULL or
932 * a valid context. Both can be freed via cifs_free_hash().
933 */
934int
935cifs_alloc_hash(const char *name,
936 struct crypto_shash **shash, struct sdesc **sdesc)
937{
938 int rc = 0;
939 size_t size;
940
941 if (*sdesc != NULL)
942 return 0;
943
944 *shash = crypto_alloc_shash(name, 0, 0);
945 if (IS_ERR(*shash)) {
946 cifs_dbg(VFS, "could not allocate crypto %s\n", name);
947 rc = PTR_ERR(*shash);
948 *shash = NULL;
949 *sdesc = NULL;
950 return rc;
951 }
952
953 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
954 *sdesc = kmalloc(size, GFP_KERNEL);
955 if (*sdesc == NULL) {
956 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
957 crypto_free_shash(*shash);
958 *shash = NULL;
959 return -ENOMEM;
960 }
961
962 (*sdesc)->shash.tfm = *shash;
963 return 0;
964}
965
966/**
967 * cifs_free_hash - free hash and hash context together
968 *
969 * Freeing a NULL hash or context is safe.
970 */
971void
972cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
973{
974 kfree(*sdesc);
975 *sdesc = NULL;
976 if (*shash)
977 crypto_free_shash(*shash);
978 *shash = NULL;
979}
980
981/**
982 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
983 * Input: rqst - a smb_rqst, page - a page index for rqst
984 * Output: *len - the length for this page, *offset - the offset for this page
985 */
986void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
987 unsigned int *len, unsigned int *offset)
988{
989 *len = rqst->rq_pagesz;
990 *offset = (page == 0) ? rqst->rq_offset : 0;
991
992 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
993 *len = rqst->rq_tailsz;
994 else if (page == 0)
995 *len = rqst->rq_pagesz - rqst->rq_offset;
996}
997
998void extract_unc_hostname(const char *unc, const char **h, size_t *len)
999{
1000 const char *end;
1001
1002 /* skip initial slashes */
1003 while (*unc && (*unc == '\\' || *unc == '/'))
1004 unc++;
1005
1006 end = unc;
1007
1008 while (*end && !(*end == '\\' || *end == '/'))
1009 end++;
1010
1011 *h = unc;
1012 *len = end - unc;
1013}
1014
1015/**
1016 * copy_path_name - copy src path to dst, possibly truncating
1017 *
1018 * returns number of bytes written (including trailing nul)
1019 */
1020int copy_path_name(char *dst, const char *src)
1021{
1022 int name_len;
1023
1024 /*
1025 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1026 * will truncate and strlen(dst) will be PATH_MAX-1
1027 */
1028 name_len = strscpy(dst, src, PATH_MAX);
1029 if (WARN_ON_ONCE(name_len < 0))
1030 name_len = PATH_MAX-1;
1031
1032 /* we count the trailing nul */
1033 name_len++;
1034 return name_len;
1035}