Loading...
1/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
29#include "smberr.h"
30#include "nterr.h"
31#include "cifs_unicode.h"
32
33extern mempool_t *cifs_sm_req_poolp;
34extern mempool_t *cifs_req_poolp;
35
36/* The xid serves as a useful identifier for each incoming vfs request,
37 in a similar way to the mid which is useful to track each sent smb,
38 and CurrentXid can also provide a running counter (although it
39 will eventually wrap past zero) of the total vfs operations handled
40 since the cifs fs was mounted */
41
42unsigned int
43_GetXid(void)
44{
45 unsigned int xid;
46
47 spin_lock(&GlobalMid_Lock);
48 GlobalTotalActiveXid++;
49
50 /* keep high water mark for number of simultaneous ops in filesystem */
51 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
52 GlobalMaxActiveXid = GlobalTotalActiveXid;
53 if (GlobalTotalActiveXid > 65000)
54 cFYI(1, "warning: more than 65000 requests active");
55 xid = GlobalCurrentXid++;
56 spin_unlock(&GlobalMid_Lock);
57 return xid;
58}
59
60void
61_FreeXid(unsigned int xid)
62{
63 spin_lock(&GlobalMid_Lock);
64 /* if (GlobalTotalActiveXid == 0)
65 BUG(); */
66 GlobalTotalActiveXid--;
67 spin_unlock(&GlobalMid_Lock);
68}
69
70struct cifs_ses *
71sesInfoAlloc(void)
72{
73 struct cifs_ses *ret_buf;
74
75 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
76 if (ret_buf) {
77 atomic_inc(&sesInfoAllocCount);
78 ret_buf->status = CifsNew;
79 ++ret_buf->ses_count;
80 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
81 INIT_LIST_HEAD(&ret_buf->tcon_list);
82 mutex_init(&ret_buf->session_mutex);
83 }
84 return ret_buf;
85}
86
87void
88sesInfoFree(struct cifs_ses *buf_to_free)
89{
90 if (buf_to_free == NULL) {
91 cFYI(1, "Null buffer passed to sesInfoFree");
92 return;
93 }
94
95 atomic_dec(&sesInfoAllocCount);
96 kfree(buf_to_free->serverOS);
97 kfree(buf_to_free->serverDomain);
98 kfree(buf_to_free->serverNOS);
99 if (buf_to_free->password) {
100 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
101 kfree(buf_to_free->password);
102 }
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kfree(buf_to_free);
106}
107
108struct cifs_tcon *
109tconInfoAlloc(void)
110{
111 struct cifs_tcon *ret_buf;
112 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
113 if (ret_buf) {
114 atomic_inc(&tconInfoAllocCount);
115 ret_buf->tidStatus = CifsNew;
116 ++ret_buf->tc_count;
117 INIT_LIST_HEAD(&ret_buf->openFileList);
118 INIT_LIST_HEAD(&ret_buf->tcon_list);
119#ifdef CONFIG_CIFS_STATS
120 spin_lock_init(&ret_buf->stat_lock);
121#endif
122 }
123 return ret_buf;
124}
125
126void
127tconInfoFree(struct cifs_tcon *buf_to_free)
128{
129 if (buf_to_free == NULL) {
130 cFYI(1, "Null buffer passed to tconInfoFree");
131 return;
132 }
133 atomic_dec(&tconInfoAllocCount);
134 kfree(buf_to_free->nativeFileSystem);
135 if (buf_to_free->password) {
136 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
137 kfree(buf_to_free->password);
138 }
139 kfree(buf_to_free);
140}
141
142struct smb_hdr *
143cifs_buf_get(void)
144{
145 struct smb_hdr *ret_buf = NULL;
146
147/* We could use negotiated size instead of max_msgsize -
148 but it may be more efficient to always alloc same size
149 albeit slightly larger than necessary and maxbuffersize
150 defaults to this and can not be bigger */
151 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
152
153 /* clear the first few header bytes */
154 /* for most paths, more is cleared in header_assemble */
155 if (ret_buf) {
156 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
157 atomic_inc(&bufAllocCount);
158#ifdef CONFIG_CIFS_STATS2
159 atomic_inc(&totBufAllocCount);
160#endif /* CONFIG_CIFS_STATS2 */
161 }
162
163 return ret_buf;
164}
165
166void
167cifs_buf_release(void *buf_to_free)
168{
169 if (buf_to_free == NULL) {
170 /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
171 return;
172 }
173 mempool_free(buf_to_free, cifs_req_poolp);
174
175 atomic_dec(&bufAllocCount);
176 return;
177}
178
179struct smb_hdr *
180cifs_small_buf_get(void)
181{
182 struct smb_hdr *ret_buf = NULL;
183
184/* We could use negotiated size instead of max_msgsize -
185 but it may be more efficient to always alloc same size
186 albeit slightly larger than necessary and maxbuffersize
187 defaults to this and can not be bigger */
188 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
189 if (ret_buf) {
190 /* No need to clear memory here, cleared in header assemble */
191 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
192 atomic_inc(&smBufAllocCount);
193#ifdef CONFIG_CIFS_STATS2
194 atomic_inc(&totSmBufAllocCount);
195#endif /* CONFIG_CIFS_STATS2 */
196
197 }
198 return ret_buf;
199}
200
201void
202cifs_small_buf_release(void *buf_to_free)
203{
204
205 if (buf_to_free == NULL) {
206 cFYI(1, "Null buffer passed to cifs_small_buf_release");
207 return;
208 }
209 mempool_free(buf_to_free, cifs_sm_req_poolp);
210
211 atomic_dec(&smBufAllocCount);
212 return;
213}
214
215/*
216 Find a free multiplex id (SMB mid). Otherwise there could be
217 mid collisions which might cause problems, demultiplexing the
218 wrong response to this request. Multiplex ids could collide if
219 one of a series requests takes much longer than the others, or
220 if a very large number of long lived requests (byte range
221 locks or FindNotify requests) are pending. No more than
222 64K-1 requests can be outstanding at one time. If no
223 mids are available, return zero. A future optimization
224 could make the combination of mids and uid the key we use
225 to demultiplex on (rather than mid alone).
226 In addition to the above check, the cifs demultiplex
227 code already used the command code as a secondary
228 check of the frame and if signing is negotiated the
229 response would be discarded if the mid were the same
230 but the signature was wrong. Since the mid is not put in the
231 pending queue until later (when it is about to be dispatched)
232 we do have to limit the number of outstanding requests
233 to somewhat less than 64K-1 although it is hard to imagine
234 so many threads being in the vfs at one time.
235*/
236__u16 GetNextMid(struct TCP_Server_Info *server)
237{
238 __u16 mid = 0;
239 __u16 last_mid;
240 bool collision;
241
242 spin_lock(&GlobalMid_Lock);
243 last_mid = server->CurrentMid; /* we do not want to loop forever */
244 server->CurrentMid++;
245 /* This nested loop looks more expensive than it is.
246 In practice the list of pending requests is short,
247 fewer than 50, and the mids are likely to be unique
248 on the first pass through the loop unless some request
249 takes longer than the 64 thousand requests before it
250 (and it would also have to have been a request that
251 did not time out) */
252 while (server->CurrentMid != last_mid) {
253 struct mid_q_entry *mid_entry;
254 unsigned int num_mids;
255
256 collision = false;
257 if (server->CurrentMid == 0)
258 server->CurrentMid++;
259
260 num_mids = 0;
261 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
262 ++num_mids;
263 if (mid_entry->mid == server->CurrentMid &&
264 mid_entry->midState == MID_REQUEST_SUBMITTED) {
265 /* This mid is in use, try a different one */
266 collision = true;
267 break;
268 }
269 }
270
271 /*
272 * if we have more than 32k mids in the list, then something
273 * is very wrong. Possibly a local user is trying to DoS the
274 * box by issuing long-running calls and SIGKILL'ing them. If
275 * we get to 2^16 mids then we're in big trouble as this
276 * function could loop forever.
277 *
278 * Go ahead and assign out the mid in this situation, but force
279 * an eventual reconnect to clean out the pending_mid_q.
280 */
281 if (num_mids > 32768)
282 server->tcpStatus = CifsNeedReconnect;
283
284 if (!collision) {
285 mid = server->CurrentMid;
286 break;
287 }
288 server->CurrentMid++;
289 }
290 spin_unlock(&GlobalMid_Lock);
291 return mid;
292}
293
294/* NB: MID can not be set if treeCon not passed in, in that
295 case it is responsbility of caller to set the mid */
296void
297header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
298 const struct cifs_tcon *treeCon, int word_count
299 /* length of fixed section (word count) in two byte units */)
300{
301 struct list_head *temp_item;
302 struct cifs_ses *ses;
303 char *temp = (char *) buffer;
304
305 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
306
307 buffer->smb_buf_length = cpu_to_be32(
308 (2 * word_count) + sizeof(struct smb_hdr) -
309 4 /* RFC 1001 length field does not count */ +
310 2 /* for bcc field itself */) ;
311
312 buffer->Protocol[0] = 0xFF;
313 buffer->Protocol[1] = 'S';
314 buffer->Protocol[2] = 'M';
315 buffer->Protocol[3] = 'B';
316 buffer->Command = smb_command;
317 buffer->Flags = 0x00; /* case sensitive */
318 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
319 buffer->Pid = cpu_to_le16((__u16)current->tgid);
320 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
321 if (treeCon) {
322 buffer->Tid = treeCon->tid;
323 if (treeCon->ses) {
324 if (treeCon->ses->capabilities & CAP_UNICODE)
325 buffer->Flags2 |= SMBFLG2_UNICODE;
326 if (treeCon->ses->capabilities & CAP_STATUS32)
327 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
328
329 /* Uid is not converted */
330 buffer->Uid = treeCon->ses->Suid;
331 buffer->Mid = GetNextMid(treeCon->ses->server);
332 if (multiuser_mount != 0) {
333 /* For the multiuser case, there are few obvious technically */
334 /* possible mechanisms to match the local linux user (uid) */
335 /* to a valid remote smb user (smb_uid): */
336 /* 1) Query Winbind (or other local pam/nss daemon */
337 /* for userid/password/logon_domain or credential */
338 /* 2) Query Winbind for uid to sid to username mapping */
339 /* and see if we have a matching password for existing*/
340 /* session for that user perhas getting password by */
341 /* adding a new pam_cifs module that stores passwords */
342 /* so that the cifs vfs can get at that for all logged*/
343 /* on users */
344 /* 3) (Which is the mechanism we have chosen) */
345 /* Search through sessions to the same server for a */
346 /* a match on the uid that was passed in on mount */
347 /* with the current processes uid (or euid?) and use */
348 /* that smb uid. If no existing smb session for */
349 /* that uid found, use the default smb session ie */
350 /* the smb session for the volume mounted which is */
351 /* the same as would be used if the multiuser mount */
352 /* flag were disabled. */
353
354 /* BB Add support for establishing new tCon and SMB Session */
355 /* with userid/password pairs found on the smb session */
356 /* for other target tcp/ip addresses BB */
357 if (current_fsuid() != treeCon->ses->linux_uid) {
358 cFYI(1, "Multiuser mode and UID "
359 "did not match tcon uid");
360 spin_lock(&cifs_tcp_ses_lock);
361 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
362 ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
363 if (ses->linux_uid == current_fsuid()) {
364 if (ses->server == treeCon->ses->server) {
365 cFYI(1, "found matching uid substitute right smb_uid");
366 buffer->Uid = ses->Suid;
367 break;
368 } else {
369 /* BB eventually call cifs_setup_session here */
370 cFYI(1, "local UID found but no smb sess with this server exists");
371 }
372 }
373 }
374 spin_unlock(&cifs_tcp_ses_lock);
375 }
376 }
377 }
378 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
379 buffer->Flags2 |= SMBFLG2_DFS;
380 if (treeCon->nocase)
381 buffer->Flags |= SMBFLG_CASELESS;
382 if ((treeCon->ses) && (treeCon->ses->server))
383 if (treeCon->ses->server->sec_mode &
384 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
385 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
386 }
387
388/* endian conversion of flags is now done just before sending */
389 buffer->WordCount = (char) word_count;
390 return;
391}
392
393static int
394check_smb_hdr(struct smb_hdr *smb, __u16 mid)
395{
396 /* does it have the right SMB "signature" ? */
397 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
398 cERROR(1, "Bad protocol string signature header 0x%x",
399 *(unsigned int *)smb->Protocol);
400 return 1;
401 }
402
403 /* Make sure that message ids match */
404 if (mid != smb->Mid) {
405 cERROR(1, "Mids do not match. received=%u expected=%u",
406 smb->Mid, mid);
407 return 1;
408 }
409
410 /* if it's a response then accept */
411 if (smb->Flags & SMBFLG_RESPONSE)
412 return 0;
413
414 /* only one valid case where server sends us request */
415 if (smb->Command == SMB_COM_LOCKING_ANDX)
416 return 0;
417
418 cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
419 return 1;
420}
421
422int
423checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
424{
425 __u32 len = be32_to_cpu(smb->smb_buf_length);
426 __u32 clc_len; /* calculated length */
427 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
428
429 if (length < 2 + sizeof(struct smb_hdr)) {
430 if ((length >= sizeof(struct smb_hdr) - 1)
431 && (smb->Status.CifsError != 0)) {
432 smb->WordCount = 0;
433 /* some error cases do not return wct and bcc */
434 return 0;
435 } else if ((length == sizeof(struct smb_hdr) + 1) &&
436 (smb->WordCount == 0)) {
437 char *tmp = (char *)smb;
438 /* Need to work around a bug in two servers here */
439 /* First, check if the part of bcc they sent was zero */
440 if (tmp[sizeof(struct smb_hdr)] == 0) {
441 /* some servers return only half of bcc
442 * on simple responses (wct, bcc both zero)
443 * in particular have seen this on
444 * ulogoffX and FindClose. This leaves
445 * one byte of bcc potentially unitialized
446 */
447 /* zero rest of bcc */
448 tmp[sizeof(struct smb_hdr)+1] = 0;
449 return 0;
450 }
451 cERROR(1, "rcvd invalid byte count (bcc)");
452 } else {
453 cERROR(1, "Length less than smb header size");
454 }
455 return 1;
456 }
457 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
458 cERROR(1, "smb length greater than MaxBufSize, mid=%d",
459 smb->Mid);
460 return 1;
461 }
462
463 if (check_smb_hdr(smb, mid))
464 return 1;
465 clc_len = smbCalcSize(smb);
466
467 if (4 + len != length) {
468 cERROR(1, "Length read does not match RFC1001 length %d",
469 len);
470 return 1;
471 }
472
473 if (4 + len != clc_len) {
474 /* check if bcc wrapped around for large read responses */
475 if ((len > 64 * 1024) && (len > clc_len)) {
476 /* check if lengths match mod 64K */
477 if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
478 return 0; /* bcc wrapped */
479 }
480 cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
481 clc_len, 4 + len, smb->Mid);
482
483 if (4 + len < clc_len) {
484 cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
485 len, smb->Mid);
486 return 1;
487 } else if (len > clc_len + 512) {
488 /*
489 * Some servers (Windows XP in particular) send more
490 * data than the lengths in the SMB packet would
491 * indicate on certain calls (byte range locks and
492 * trans2 find first calls in particular). While the
493 * client can handle such a frame by ignoring the
494 * trailing data, we choose limit the amount of extra
495 * data to 512 bytes.
496 */
497 cERROR(1, "RFC1001 size %u more than 512 bytes larger "
498 "than SMB for mid=%u", len, smb->Mid);
499 return 1;
500 }
501 }
502 return 0;
503}
504
505bool
506is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
507{
508 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
509 struct list_head *tmp, *tmp1, *tmp2;
510 struct cifs_ses *ses;
511 struct cifs_tcon *tcon;
512 struct cifsInodeInfo *pCifsInode;
513 struct cifsFileInfo *netfile;
514
515 cFYI(1, "Checking for oplock break or dnotify response");
516 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
517 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
518 struct smb_com_transaction_change_notify_rsp *pSMBr =
519 (struct smb_com_transaction_change_notify_rsp *)buf;
520 struct file_notify_information *pnotify;
521 __u32 data_offset = 0;
522 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
523 data_offset = le32_to_cpu(pSMBr->DataOffset);
524
525 pnotify = (struct file_notify_information *)
526 ((char *)&pSMBr->hdr.Protocol + data_offset);
527 cFYI(1, "dnotify on %s Action: 0x%x",
528 pnotify->FileName, pnotify->Action);
529 /* cifs_dump_mem("Rcvd notify Data: ",buf,
530 sizeof(struct smb_hdr)+60); */
531 return true;
532 }
533 if (pSMBr->hdr.Status.CifsError) {
534 cFYI(1, "notify err 0x%d",
535 pSMBr->hdr.Status.CifsError);
536 return true;
537 }
538 return false;
539 }
540 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
541 return false;
542 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
543 /* no sense logging error on invalid handle on oplock
544 break - harmless race between close request and oplock
545 break response is expected from time to time writing out
546 large dirty files cached on the client */
547 if ((NT_STATUS_INVALID_HANDLE) ==
548 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
549 cFYI(1, "invalid handle on oplock break");
550 return true;
551 } else if (ERRbadfid ==
552 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
553 return true;
554 } else {
555 return false; /* on valid oplock brk we get "request" */
556 }
557 }
558 if (pSMB->hdr.WordCount != 8)
559 return false;
560
561 cFYI(1, "oplock type 0x%d level 0x%d",
562 pSMB->LockType, pSMB->OplockLevel);
563 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
564 return false;
565
566 /* look up tcon based on tid & uid */
567 spin_lock(&cifs_tcp_ses_lock);
568 list_for_each(tmp, &srv->smb_ses_list) {
569 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
570 list_for_each(tmp1, &ses->tcon_list) {
571 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
572 if (tcon->tid != buf->Tid)
573 continue;
574
575 cifs_stats_inc(&tcon->num_oplock_brks);
576 spin_lock(&cifs_file_list_lock);
577 list_for_each(tmp2, &tcon->openFileList) {
578 netfile = list_entry(tmp2, struct cifsFileInfo,
579 tlist);
580 if (pSMB->Fid != netfile->netfid)
581 continue;
582
583 cFYI(1, "file id match, oplock break");
584 pCifsInode = CIFS_I(netfile->dentry->d_inode);
585
586 cifs_set_oplock_level(pCifsInode,
587 pSMB->OplockLevel ? OPLOCK_READ : 0);
588 queue_work(system_nrt_wq,
589 &netfile->oplock_break);
590 netfile->oplock_break_cancelled = false;
591
592 spin_unlock(&cifs_file_list_lock);
593 spin_unlock(&cifs_tcp_ses_lock);
594 return true;
595 }
596 spin_unlock(&cifs_file_list_lock);
597 spin_unlock(&cifs_tcp_ses_lock);
598 cFYI(1, "No matching file for oplock break");
599 return true;
600 }
601 }
602 spin_unlock(&cifs_tcp_ses_lock);
603 cFYI(1, "Can not process oplock break for non-existent connection");
604 return true;
605}
606
607void
608dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
609{
610 int i, j;
611 char debug_line[17];
612 unsigned char *buffer;
613
614 if (traceSMB == 0)
615 return;
616
617 buffer = (unsigned char *) smb_buf;
618 for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
619 if (i % 8 == 0) {
620 /* have reached the beginning of line */
621 printk(KERN_DEBUG "| ");
622 j = 0;
623 }
624 printk("%0#4x ", buffer[i]);
625 debug_line[2 * j] = ' ';
626 if (isprint(buffer[i]))
627 debug_line[1 + (2 * j)] = buffer[i];
628 else
629 debug_line[1 + (2 * j)] = '_';
630
631 if (i % 8 == 7) {
632 /* reached end of line, time to print ascii */
633 debug_line[16] = 0;
634 printk(" | %s\n", debug_line);
635 }
636 }
637 for (; j < 8; j++) {
638 printk(" ");
639 debug_line[2 * j] = ' ';
640 debug_line[1 + (2 * j)] = ' ';
641 }
642 printk(" | %s\n", debug_line);
643 return;
644}
645
646void
647cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
648{
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
650 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
651 cERROR(1, "Autodisabling the use of server inode numbers on "
652 "%s. This server doesn't seem to support them "
653 "properly. Hardlinks will not be recognized on this "
654 "mount. Consider mounting with the \"noserverino\" "
655 "option to silence this message.",
656 cifs_sb_master_tcon(cifs_sb)->treeName);
657 }
658}
659
660void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
661{
662 oplock &= 0xF;
663
664 if (oplock == OPLOCK_EXCLUSIVE) {
665 cinode->clientCanCacheAll = true;
666 cinode->clientCanCacheRead = true;
667 cFYI(1, "Exclusive Oplock granted on inode %p",
668 &cinode->vfs_inode);
669 } else if (oplock == OPLOCK_READ) {
670 cinode->clientCanCacheAll = false;
671 cinode->clientCanCacheRead = true;
672 cFYI(1, "Level II Oplock granted on inode %p",
673 &cinode->vfs_inode);
674 } else {
675 cinode->clientCanCacheAll = false;
676 cinode->clientCanCacheRead = false;
677 }
678}
1/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include <linux/vmalloc.h>
26#include "cifspdu.h"
27#include "cifsglob.h"
28#include "cifsproto.h"
29#include "cifs_debug.h"
30#include "smberr.h"
31#include "nterr.h"
32#include "cifs_unicode.h"
33#include "smb2pdu.h"
34
35extern mempool_t *cifs_sm_req_poolp;
36extern mempool_t *cifs_req_poolp;
37
38/* The xid serves as a useful identifier for each incoming vfs request,
39 in a similar way to the mid which is useful to track each sent smb,
40 and CurrentXid can also provide a running counter (although it
41 will eventually wrap past zero) of the total vfs operations handled
42 since the cifs fs was mounted */
43
44unsigned int
45_get_xid(void)
46{
47 unsigned int xid;
48
49 spin_lock(&GlobalMid_Lock);
50 GlobalTotalActiveXid++;
51
52 /* keep high water mark for number of simultaneous ops in filesystem */
53 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
54 GlobalMaxActiveXid = GlobalTotalActiveXid;
55 if (GlobalTotalActiveXid > 65000)
56 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
57 xid = GlobalCurrentXid++;
58 spin_unlock(&GlobalMid_Lock);
59 return xid;
60}
61
62void
63_free_xid(unsigned int xid)
64{
65 spin_lock(&GlobalMid_Lock);
66 /* if (GlobalTotalActiveXid == 0)
67 BUG(); */
68 GlobalTotalActiveXid--;
69 spin_unlock(&GlobalMid_Lock);
70}
71
72struct cifs_ses *
73sesInfoAlloc(void)
74{
75 struct cifs_ses *ret_buf;
76
77 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
78 if (ret_buf) {
79 atomic_inc(&sesInfoAllocCount);
80 ret_buf->status = CifsNew;
81 ++ret_buf->ses_count;
82 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
83 INIT_LIST_HEAD(&ret_buf->tcon_list);
84 mutex_init(&ret_buf->session_mutex);
85 }
86 return ret_buf;
87}
88
89void
90sesInfoFree(struct cifs_ses *buf_to_free)
91{
92 if (buf_to_free == NULL) {
93 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
94 return;
95 }
96
97 atomic_dec(&sesInfoAllocCount);
98 kfree(buf_to_free->serverOS);
99 kfree(buf_to_free->serverDomain);
100 kfree(buf_to_free->serverNOS);
101 kzfree(buf_to_free->password);
102 kfree(buf_to_free->user_name);
103 kfree(buf_to_free->domainName);
104 kzfree(buf_to_free->auth_key.response);
105 kzfree(buf_to_free);
106}
107
108struct cifs_tcon *
109tconInfoAlloc(void)
110{
111 struct cifs_tcon *ret_buf;
112 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
113 if (ret_buf) {
114 atomic_inc(&tconInfoAllocCount);
115 ret_buf->tidStatus = CifsNew;
116 ++ret_buf->tc_count;
117 INIT_LIST_HEAD(&ret_buf->openFileList);
118 INIT_LIST_HEAD(&ret_buf->tcon_list);
119 spin_lock_init(&ret_buf->open_file_lock);
120#ifdef CONFIG_CIFS_STATS
121 spin_lock_init(&ret_buf->stat_lock);
122#endif
123 }
124 return ret_buf;
125}
126
127void
128tconInfoFree(struct cifs_tcon *buf_to_free)
129{
130 if (buf_to_free == NULL) {
131 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
132 return;
133 }
134 atomic_dec(&tconInfoAllocCount);
135 kfree(buf_to_free->nativeFileSystem);
136 kzfree(buf_to_free->password);
137 kfree(buf_to_free);
138}
139
140struct smb_hdr *
141cifs_buf_get(void)
142{
143 struct smb_hdr *ret_buf = NULL;
144 /*
145 * SMB2 header is bigger than CIFS one - no problems to clean some
146 * more bytes for CIFS.
147 */
148 size_t buf_size = sizeof(struct smb2_hdr);
149
150 /*
151 * We could use negotiated size instead of max_msgsize -
152 * but it may be more efficient to always alloc same size
153 * albeit slightly larger than necessary and maxbuffersize
154 * defaults to this and can not be bigger.
155 */
156 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
157
158 /* clear the first few header bytes */
159 /* for most paths, more is cleared in header_assemble */
160 memset(ret_buf, 0, buf_size + 3);
161 atomic_inc(&bufAllocCount);
162#ifdef CONFIG_CIFS_STATS2
163 atomic_inc(&totBufAllocCount);
164#endif /* CONFIG_CIFS_STATS2 */
165
166 return ret_buf;
167}
168
169void
170cifs_buf_release(void *buf_to_free)
171{
172 if (buf_to_free == NULL) {
173 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
174 return;
175 }
176 mempool_free(buf_to_free, cifs_req_poolp);
177
178 atomic_dec(&bufAllocCount);
179 return;
180}
181
182struct smb_hdr *
183cifs_small_buf_get(void)
184{
185 struct smb_hdr *ret_buf = NULL;
186
187/* We could use negotiated size instead of max_msgsize -
188 but it may be more efficient to always alloc same size
189 albeit slightly larger than necessary and maxbuffersize
190 defaults to this and can not be bigger */
191 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
192 /* No need to clear memory here, cleared in header assemble */
193 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
194 atomic_inc(&smBufAllocCount);
195#ifdef CONFIG_CIFS_STATS2
196 atomic_inc(&totSmBufAllocCount);
197#endif /* CONFIG_CIFS_STATS2 */
198
199 return ret_buf;
200}
201
202void
203cifs_small_buf_release(void *buf_to_free)
204{
205
206 if (buf_to_free == NULL) {
207 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
208 return;
209 }
210 mempool_free(buf_to_free, cifs_sm_req_poolp);
211
212 atomic_dec(&smBufAllocCount);
213 return;
214}
215
216void
217free_rsp_buf(int resp_buftype, void *rsp)
218{
219 if (resp_buftype == CIFS_SMALL_BUFFER)
220 cifs_small_buf_release(rsp);
221 else if (resp_buftype == CIFS_LARGE_BUFFER)
222 cifs_buf_release(rsp);
223}
224
225/* NB: MID can not be set if treeCon not passed in, in that
226 case it is responsbility of caller to set the mid */
227void
228header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
229 const struct cifs_tcon *treeCon, int word_count
230 /* length of fixed section (word count) in two byte units */)
231{
232 char *temp = (char *) buffer;
233
234 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
235
236 buffer->smb_buf_length = cpu_to_be32(
237 (2 * word_count) + sizeof(struct smb_hdr) -
238 4 /* RFC 1001 length field does not count */ +
239 2 /* for bcc field itself */) ;
240
241 buffer->Protocol[0] = 0xFF;
242 buffer->Protocol[1] = 'S';
243 buffer->Protocol[2] = 'M';
244 buffer->Protocol[3] = 'B';
245 buffer->Command = smb_command;
246 buffer->Flags = 0x00; /* case sensitive */
247 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
248 buffer->Pid = cpu_to_le16((__u16)current->tgid);
249 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
250 if (treeCon) {
251 buffer->Tid = treeCon->tid;
252 if (treeCon->ses) {
253 if (treeCon->ses->capabilities & CAP_UNICODE)
254 buffer->Flags2 |= SMBFLG2_UNICODE;
255 if (treeCon->ses->capabilities & CAP_STATUS32)
256 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
257
258 /* Uid is not converted */
259 buffer->Uid = treeCon->ses->Suid;
260 buffer->Mid = get_next_mid(treeCon->ses->server);
261 }
262 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
263 buffer->Flags2 |= SMBFLG2_DFS;
264 if (treeCon->nocase)
265 buffer->Flags |= SMBFLG_CASELESS;
266 if ((treeCon->ses) && (treeCon->ses->server))
267 if (treeCon->ses->server->sign)
268 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
269 }
270
271/* endian conversion of flags is now done just before sending */
272 buffer->WordCount = (char) word_count;
273 return;
274}
275
276static int
277check_smb_hdr(struct smb_hdr *smb)
278{
279 /* does it have the right SMB "signature" ? */
280 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
281 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
282 *(unsigned int *)smb->Protocol);
283 return 1;
284 }
285
286 /* if it's a response then accept */
287 if (smb->Flags & SMBFLG_RESPONSE)
288 return 0;
289
290 /* only one valid case where server sends us request */
291 if (smb->Command == SMB_COM_LOCKING_ANDX)
292 return 0;
293
294 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
295 get_mid(smb));
296 return 1;
297}
298
299int
300checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
301{
302 struct smb_hdr *smb = (struct smb_hdr *)buf;
303 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
304 __u32 clc_len; /* calculated length */
305 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
306 total_read, rfclen);
307
308 /* is this frame too small to even get to a BCC? */
309 if (total_read < 2 + sizeof(struct smb_hdr)) {
310 if ((total_read >= sizeof(struct smb_hdr) - 1)
311 && (smb->Status.CifsError != 0)) {
312 /* it's an error return */
313 smb->WordCount = 0;
314 /* some error cases do not return wct and bcc */
315 return 0;
316 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
317 (smb->WordCount == 0)) {
318 char *tmp = (char *)smb;
319 /* Need to work around a bug in two servers here */
320 /* First, check if the part of bcc they sent was zero */
321 if (tmp[sizeof(struct smb_hdr)] == 0) {
322 /* some servers return only half of bcc
323 * on simple responses (wct, bcc both zero)
324 * in particular have seen this on
325 * ulogoffX and FindClose. This leaves
326 * one byte of bcc potentially unitialized
327 */
328 /* zero rest of bcc */
329 tmp[sizeof(struct smb_hdr)+1] = 0;
330 return 0;
331 }
332 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
333 } else {
334 cifs_dbg(VFS, "Length less than smb header size\n");
335 }
336 return -EIO;
337 }
338
339 /* otherwise, there is enough to get to the BCC */
340 if (check_smb_hdr(smb))
341 return -EIO;
342 clc_len = smbCalcSize(smb);
343
344 if (4 + rfclen != total_read) {
345 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
346 rfclen);
347 return -EIO;
348 }
349
350 if (4 + rfclen != clc_len) {
351 __u16 mid = get_mid(smb);
352 /* check if bcc wrapped around for large read responses */
353 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
354 /* check if lengths match mod 64K */
355 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
356 return 0; /* bcc wrapped */
357 }
358 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
359 clc_len, 4 + rfclen, mid);
360
361 if (4 + rfclen < clc_len) {
362 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
363 rfclen, mid);
364 return -EIO;
365 } else if (rfclen > clc_len + 512) {
366 /*
367 * Some servers (Windows XP in particular) send more
368 * data than the lengths in the SMB packet would
369 * indicate on certain calls (byte range locks and
370 * trans2 find first calls in particular). While the
371 * client can handle such a frame by ignoring the
372 * trailing data, we choose limit the amount of extra
373 * data to 512 bytes.
374 */
375 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
376 rfclen, mid);
377 return -EIO;
378 }
379 }
380 return 0;
381}
382
383bool
384is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
385{
386 struct smb_hdr *buf = (struct smb_hdr *)buffer;
387 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
388 struct list_head *tmp, *tmp1, *tmp2;
389 struct cifs_ses *ses;
390 struct cifs_tcon *tcon;
391 struct cifsInodeInfo *pCifsInode;
392 struct cifsFileInfo *netfile;
393
394 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
395 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
396 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
397 struct smb_com_transaction_change_notify_rsp *pSMBr =
398 (struct smb_com_transaction_change_notify_rsp *)buf;
399 struct file_notify_information *pnotify;
400 __u32 data_offset = 0;
401 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
402 data_offset = le32_to_cpu(pSMBr->DataOffset);
403
404 pnotify = (struct file_notify_information *)
405 ((char *)&pSMBr->hdr.Protocol + data_offset);
406 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
407 pnotify->FileName, pnotify->Action);
408 /* cifs_dump_mem("Rcvd notify Data: ",buf,
409 sizeof(struct smb_hdr)+60); */
410 return true;
411 }
412 if (pSMBr->hdr.Status.CifsError) {
413 cifs_dbg(FYI, "notify err 0x%x\n",
414 pSMBr->hdr.Status.CifsError);
415 return true;
416 }
417 return false;
418 }
419 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
420 return false;
421 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
422 /* no sense logging error on invalid handle on oplock
423 break - harmless race between close request and oplock
424 break response is expected from time to time writing out
425 large dirty files cached on the client */
426 if ((NT_STATUS_INVALID_HANDLE) ==
427 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
428 cifs_dbg(FYI, "invalid handle on oplock break\n");
429 return true;
430 } else if (ERRbadfid ==
431 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
432 return true;
433 } else {
434 return false; /* on valid oplock brk we get "request" */
435 }
436 }
437 if (pSMB->hdr.WordCount != 8)
438 return false;
439
440 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
441 pSMB->LockType, pSMB->OplockLevel);
442 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
443 return false;
444
445 /* look up tcon based on tid & uid */
446 spin_lock(&cifs_tcp_ses_lock);
447 list_for_each(tmp, &srv->smb_ses_list) {
448 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
449 list_for_each(tmp1, &ses->tcon_list) {
450 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
451 if (tcon->tid != buf->Tid)
452 continue;
453
454 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
455 spin_lock(&tcon->open_file_lock);
456 list_for_each(tmp2, &tcon->openFileList) {
457 netfile = list_entry(tmp2, struct cifsFileInfo,
458 tlist);
459 if (pSMB->Fid != netfile->fid.netfid)
460 continue;
461
462 cifs_dbg(FYI, "file id match, oplock break\n");
463 pCifsInode = CIFS_I(d_inode(netfile->dentry));
464
465 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
466 &pCifsInode->flags);
467
468 /*
469 * Set flag if the server downgrades the oplock
470 * to L2 else clear.
471 */
472 if (pSMB->OplockLevel)
473 set_bit(
474 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
475 &pCifsInode->flags);
476 else
477 clear_bit(
478 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
479 &pCifsInode->flags);
480
481 queue_work(cifsoplockd_wq,
482 &netfile->oplock_break);
483 netfile->oplock_break_cancelled = false;
484
485 spin_unlock(&tcon->open_file_lock);
486 spin_unlock(&cifs_tcp_ses_lock);
487 return true;
488 }
489 spin_unlock(&tcon->open_file_lock);
490 spin_unlock(&cifs_tcp_ses_lock);
491 cifs_dbg(FYI, "No matching file for oplock break\n");
492 return true;
493 }
494 }
495 spin_unlock(&cifs_tcp_ses_lock);
496 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
497 return true;
498}
499
500void
501dump_smb(void *buf, int smb_buf_length)
502{
503 if (traceSMB == 0)
504 return;
505
506 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
507 smb_buf_length, true);
508}
509
510void
511cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
512{
513 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
514 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
515 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
516 cifs_sb_master_tcon(cifs_sb)->treeName);
517 }
518}
519
520void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
521{
522 oplock &= 0xF;
523
524 if (oplock == OPLOCK_EXCLUSIVE) {
525 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
526 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
527 &cinode->vfs_inode);
528 } else if (oplock == OPLOCK_READ) {
529 cinode->oplock = CIFS_CACHE_READ_FLG;
530 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
531 &cinode->vfs_inode);
532 } else
533 cinode->oplock = 0;
534}
535
536/*
537 * We wait for oplock breaks to be processed before we attempt to perform
538 * writes.
539 */
540int cifs_get_writer(struct cifsInodeInfo *cinode)
541{
542 int rc;
543
544start:
545 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
546 TASK_KILLABLE);
547 if (rc)
548 return rc;
549
550 spin_lock(&cinode->writers_lock);
551 if (!cinode->writers)
552 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
553 cinode->writers++;
554 /* Check to see if we have started servicing an oplock break */
555 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
556 cinode->writers--;
557 if (cinode->writers == 0) {
558 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
559 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
560 }
561 spin_unlock(&cinode->writers_lock);
562 goto start;
563 }
564 spin_unlock(&cinode->writers_lock);
565 return 0;
566}
567
568void cifs_put_writer(struct cifsInodeInfo *cinode)
569{
570 spin_lock(&cinode->writers_lock);
571 cinode->writers--;
572 if (cinode->writers == 0) {
573 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
574 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
575 }
576 spin_unlock(&cinode->writers_lock);
577}
578
579void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
580{
581 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
582 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
583}
584
585bool
586backup_cred(struct cifs_sb_info *cifs_sb)
587{
588 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
589 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
590 return true;
591 }
592 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
593 if (in_group_p(cifs_sb->mnt_backupgid))
594 return true;
595 }
596
597 return false;
598}
599
600void
601cifs_del_pending_open(struct cifs_pending_open *open)
602{
603 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
604 list_del(&open->olist);
605 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
606}
607
608void
609cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
610 struct cifs_pending_open *open)
611{
612 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
613 open->oplock = CIFS_OPLOCK_NO_CHANGE;
614 open->tlink = tlink;
615 fid->pending_open = open;
616 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
617}
618
619void
620cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
621 struct cifs_pending_open *open)
622{
623 spin_lock(&tlink_tcon(tlink)->open_file_lock);
624 cifs_add_pending_open_locked(fid, tlink, open);
625 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
626}
627
628/* parses DFS refferal V3 structure
629 * caller is responsible for freeing target_nodes
630 * returns:
631 * - on success - 0
632 * - on failure - errno
633 */
634int
635parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
636 unsigned int *num_of_nodes,
637 struct dfs_info3_param **target_nodes,
638 const struct nls_table *nls_codepage, int remap,
639 const char *searchName, bool is_unicode)
640{
641 int i, rc = 0;
642 char *data_end;
643 struct dfs_referral_level_3 *ref;
644
645 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
646
647 if (*num_of_nodes < 1) {
648 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
649 *num_of_nodes);
650 rc = -EINVAL;
651 goto parse_DFS_referrals_exit;
652 }
653
654 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
655 if (ref->VersionNumber != cpu_to_le16(3)) {
656 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
657 le16_to_cpu(ref->VersionNumber));
658 rc = -EINVAL;
659 goto parse_DFS_referrals_exit;
660 }
661
662 /* get the upper boundary of the resp buffer */
663 data_end = (char *)rsp + rsp_size;
664
665 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
666 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
667
668 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
669 GFP_KERNEL);
670 if (*target_nodes == NULL) {
671 rc = -ENOMEM;
672 goto parse_DFS_referrals_exit;
673 }
674
675 /* collect necessary data from referrals */
676 for (i = 0; i < *num_of_nodes; i++) {
677 char *temp;
678 int max_len;
679 struct dfs_info3_param *node = (*target_nodes)+i;
680
681 node->flags = le32_to_cpu(rsp->DFSFlags);
682 if (is_unicode) {
683 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
684 GFP_KERNEL);
685 if (tmp == NULL) {
686 rc = -ENOMEM;
687 goto parse_DFS_referrals_exit;
688 }
689 cifsConvertToUTF16((__le16 *) tmp, searchName,
690 PATH_MAX, nls_codepage, remap);
691 node->path_consumed = cifs_utf16_bytes(tmp,
692 le16_to_cpu(rsp->PathConsumed),
693 nls_codepage);
694 kfree(tmp);
695 } else
696 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
697
698 node->server_type = le16_to_cpu(ref->ServerType);
699 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
700
701 /* copy DfsPath */
702 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
703 max_len = data_end - temp;
704 node->path_name = cifs_strndup_from_utf16(temp, max_len,
705 is_unicode, nls_codepage);
706 if (!node->path_name) {
707 rc = -ENOMEM;
708 goto parse_DFS_referrals_exit;
709 }
710
711 /* copy link target UNC */
712 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
713 max_len = data_end - temp;
714 node->node_name = cifs_strndup_from_utf16(temp, max_len,
715 is_unicode, nls_codepage);
716 if (!node->node_name) {
717 rc = -ENOMEM;
718 goto parse_DFS_referrals_exit;
719 }
720
721 ref++;
722 }
723
724parse_DFS_referrals_exit:
725 if (rc) {
726 free_dfs_info_array(*target_nodes, *num_of_nodes);
727 *target_nodes = NULL;
728 *num_of_nodes = 0;
729 }
730 return rc;
731}
732
733struct cifs_aio_ctx *
734cifs_aio_ctx_alloc(void)
735{
736 struct cifs_aio_ctx *ctx;
737
738 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
739 if (!ctx)
740 return NULL;
741
742 INIT_LIST_HEAD(&ctx->list);
743 mutex_init(&ctx->aio_mutex);
744 init_completion(&ctx->done);
745 kref_init(&ctx->refcount);
746 return ctx;
747}
748
749void
750cifs_aio_ctx_release(struct kref *refcount)
751{
752 struct cifs_aio_ctx *ctx = container_of(refcount,
753 struct cifs_aio_ctx, refcount);
754
755 cifsFileInfo_put(ctx->cfile);
756 kvfree(ctx->bv);
757 kfree(ctx);
758}
759
760#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
761
762int
763setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
764{
765 ssize_t rc;
766 unsigned int cur_npages;
767 unsigned int npages = 0;
768 unsigned int i;
769 size_t len;
770 size_t count = iov_iter_count(iter);
771 unsigned int saved_len;
772 size_t start;
773 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
774 struct page **pages = NULL;
775 struct bio_vec *bv = NULL;
776
777 if (iter->type & ITER_KVEC) {
778 memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
779 ctx->len = count;
780 iov_iter_advance(iter, count);
781 return 0;
782 }
783
784 if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
785 bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
786 GFP_KERNEL);
787
788 if (!bv) {
789 bv = vmalloc(max_pages * sizeof(struct bio_vec));
790 if (!bv)
791 return -ENOMEM;
792 }
793
794 if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
795 pages = kmalloc_array(max_pages, sizeof(struct page *),
796 GFP_KERNEL);
797
798 if (!pages) {
799 pages = vmalloc(max_pages * sizeof(struct page *));
800 if (!pages) {
801 kvfree(bv);
802 return -ENOMEM;
803 }
804 }
805
806 saved_len = count;
807
808 while (count && npages < max_pages) {
809 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
810 if (rc < 0) {
811 cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
812 break;
813 }
814
815 if (rc > count) {
816 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
817 count);
818 break;
819 }
820
821 iov_iter_advance(iter, rc);
822 count -= rc;
823 rc += start;
824 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
825
826 if (npages + cur_npages > max_pages) {
827 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
828 npages + cur_npages, max_pages);
829 break;
830 }
831
832 for (i = 0; i < cur_npages; i++) {
833 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
834 bv[npages + i].bv_page = pages[i];
835 bv[npages + i].bv_offset = start;
836 bv[npages + i].bv_len = len - start;
837 rc -= len;
838 start = 0;
839 }
840
841 npages += cur_npages;
842 }
843
844 kvfree(pages);
845 ctx->bv = bv;
846 ctx->len = saved_len - count;
847 ctx->npages = npages;
848 iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
849 return 0;
850}
851
852/**
853 * cifs_alloc_hash - allocate hash and hash context together
854 *
855 * The caller has to make sure @sdesc is initialized to either NULL or
856 * a valid context. Both can be freed via cifs_free_hash().
857 */
858int
859cifs_alloc_hash(const char *name,
860 struct crypto_shash **shash, struct sdesc **sdesc)
861{
862 int rc = 0;
863 size_t size;
864
865 if (*sdesc != NULL)
866 return 0;
867
868 *shash = crypto_alloc_shash(name, 0, 0);
869 if (IS_ERR(*shash)) {
870 cifs_dbg(VFS, "could not allocate crypto %s\n", name);
871 rc = PTR_ERR(*shash);
872 *shash = NULL;
873 *sdesc = NULL;
874 return rc;
875 }
876
877 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
878 *sdesc = kmalloc(size, GFP_KERNEL);
879 if (*sdesc == NULL) {
880 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
881 crypto_free_shash(*shash);
882 *shash = NULL;
883 return -ENOMEM;
884 }
885
886 (*sdesc)->shash.tfm = *shash;
887 (*sdesc)->shash.flags = 0x0;
888 return 0;
889}
890
891/**
892 * cifs_free_hash - free hash and hash context together
893 *
894 * Freeing a NULL hash or context is safe.
895 */
896void
897cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
898{
899 kfree(*sdesc);
900 *sdesc = NULL;
901 if (*shash)
902 crypto_free_shash(*shash);
903 *shash = NULL;
904}