Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *   fs/cifs/misc.c
  3 *
  4 *   Copyright (C) International Business Machines  Corp., 2002,2008
  5 *   Author(s): Steve French (sfrench@us.ibm.com)
  6 *
  7 *   This library is free software; you can redistribute it and/or modify
  8 *   it under the terms of the GNU Lesser General Public License as published
  9 *   by the Free Software Foundation; either version 2.1 of the License, or
 10 *   (at your option) any later version.
 11 *
 12 *   This library is distributed in the hope that it will be useful,
 13 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
 15 *   the GNU Lesser General Public License for more details.
 16 *
 17 *   You should have received a copy of the GNU Lesser General Public License
 18 *   along with this library; if not, write to the Free Software
 19 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 20 */
 21
 22#include <linux/slab.h>
 23#include <linux/ctype.h>
 24#include <linux/mempool.h>
 
 25#include "cifspdu.h"
 26#include "cifsglob.h"
 27#include "cifsproto.h"
 28#include "cifs_debug.h"
 29#include "smberr.h"
 30#include "nterr.h"
 31#include "cifs_unicode.h"
 
 
 
 
 
 
 
 32
 33extern mempool_t *cifs_sm_req_poolp;
 34extern mempool_t *cifs_req_poolp;
 35
 36/* The xid serves as a useful identifier for each incoming vfs request,
 37   in a similar way to the mid which is useful to track each sent smb,
 38   and CurrentXid can also provide a running counter (although it
 39   will eventually wrap past zero) of the total vfs operations handled
 40   since the cifs fs was mounted */
 41
 42unsigned int
 43_GetXid(void)
 44{
 45	unsigned int xid;
 46
 47	spin_lock(&GlobalMid_Lock);
 48	GlobalTotalActiveXid++;
 49
 50	/* keep high water mark for number of simultaneous ops in filesystem */
 51	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
 52		GlobalMaxActiveXid = GlobalTotalActiveXid;
 53	if (GlobalTotalActiveXid > 65000)
 54		cFYI(1, "warning: more than 65000 requests active");
 55	xid = GlobalCurrentXid++;
 56	spin_unlock(&GlobalMid_Lock);
 57	return xid;
 58}
 59
 60void
 61_FreeXid(unsigned int xid)
 62{
 63	spin_lock(&GlobalMid_Lock);
 64	/* if (GlobalTotalActiveXid == 0)
 65		BUG(); */
 66	GlobalTotalActiveXid--;
 67	spin_unlock(&GlobalMid_Lock);
 68}
 69
 70struct cifs_ses *
 71sesInfoAlloc(void)
 72{
 73	struct cifs_ses *ret_buf;
 74
 75	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
 76	if (ret_buf) {
 77		atomic_inc(&sesInfoAllocCount);
 78		ret_buf->status = CifsNew;
 
 79		++ret_buf->ses_count;
 80		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
 81		INIT_LIST_HEAD(&ret_buf->tcon_list);
 82		mutex_init(&ret_buf->session_mutex);
 
 
 
 83	}
 84	return ret_buf;
 85}
 86
 87void
 88sesInfoFree(struct cifs_ses *buf_to_free)
 89{
 
 
 90	if (buf_to_free == NULL) {
 91		cFYI(1, "Null buffer passed to sesInfoFree");
 92		return;
 93	}
 94
 95	atomic_dec(&sesInfoAllocCount);
 96	kfree(buf_to_free->serverOS);
 97	kfree(buf_to_free->serverDomain);
 98	kfree(buf_to_free->serverNOS);
 99	if (buf_to_free->password) {
100		memset(buf_to_free->password, 0, strlen(buf_to_free->password));
101		kfree(buf_to_free->password);
102	}
103	kfree(buf_to_free->user_name);
104	kfree(buf_to_free->domainName);
105	kfree(buf_to_free);
 
 
 
 
 
 
106}
107
108struct cifs_tcon *
109tconInfoAlloc(void)
110{
111	struct cifs_tcon *ret_buf;
112	ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
113	if (ret_buf) {
114		atomic_inc(&tconInfoAllocCount);
115		ret_buf->tidStatus = CifsNew;
116		++ret_buf->tc_count;
117		INIT_LIST_HEAD(&ret_buf->openFileList);
118		INIT_LIST_HEAD(&ret_buf->tcon_list);
119#ifdef CONFIG_CIFS_STATS
120		spin_lock_init(&ret_buf->stat_lock);
121#endif
122	}
 
 
 
 
 
 
 
 
 
 
123	return ret_buf;
124}
125
126void
127tconInfoFree(struct cifs_tcon *buf_to_free)
128{
129	if (buf_to_free == NULL) {
130		cFYI(1, "Null buffer passed to tconInfoFree");
131		return;
132	}
 
133	atomic_dec(&tconInfoAllocCount);
134	kfree(buf_to_free->nativeFileSystem);
135	if (buf_to_free->password) {
136		memset(buf_to_free->password, 0, strlen(buf_to_free->password));
137		kfree(buf_to_free->password);
138	}
139	kfree(buf_to_free);
140}
141
142struct smb_hdr *
143cifs_buf_get(void)
144{
145	struct smb_hdr *ret_buf = NULL;
146
147/* We could use negotiated size instead of max_msgsize -
148   but it may be more efficient to always alloc same size
149   albeit slightly larger than necessary and maxbuffersize
150   defaults to this and can not be bigger */
 
 
 
 
 
 
 
151	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
152
153	/* clear the first few header bytes */
154	/* for most paths, more is cleared in header_assemble */
155	if (ret_buf) {
156		memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
157		atomic_inc(&bufAllocCount);
158#ifdef CONFIG_CIFS_STATS2
159		atomic_inc(&totBufAllocCount);
160#endif /* CONFIG_CIFS_STATS2 */
161	}
162
163	return ret_buf;
164}
165
166void
167cifs_buf_release(void *buf_to_free)
168{
169	if (buf_to_free == NULL) {
170		/* cFYI(1, "Null buffer passed to cifs_buf_release");*/
171		return;
172	}
173	mempool_free(buf_to_free, cifs_req_poolp);
174
175	atomic_dec(&bufAllocCount);
176	return;
177}
178
179struct smb_hdr *
180cifs_small_buf_get(void)
181{
182	struct smb_hdr *ret_buf = NULL;
183
184/* We could use negotiated size instead of max_msgsize -
185   but it may be more efficient to always alloc same size
186   albeit slightly larger than necessary and maxbuffersize
187   defaults to this and can not be bigger */
188	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
189	if (ret_buf) {
190	/* No need to clear memory here, cleared in header assemble */
191	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
192		atomic_inc(&smBufAllocCount);
193#ifdef CONFIG_CIFS_STATS2
194		atomic_inc(&totSmBufAllocCount);
195#endif /* CONFIG_CIFS_STATS2 */
196
197	}
198	return ret_buf;
199}
200
201void
202cifs_small_buf_release(void *buf_to_free)
203{
204
205	if (buf_to_free == NULL) {
206		cFYI(1, "Null buffer passed to cifs_small_buf_release");
207		return;
208	}
209	mempool_free(buf_to_free, cifs_sm_req_poolp);
210
211	atomic_dec(&smBufAllocCount);
212	return;
213}
214
215/*
216	Find a free multiplex id (SMB mid). Otherwise there could be
217	mid collisions which might cause problems, demultiplexing the
218	wrong response to this request. Multiplex ids could collide if
219	one of a series requests takes much longer than the others, or
220	if a very large number of long lived requests (byte range
221	locks or FindNotify requests) are pending.  No more than
222	64K-1 requests can be outstanding at one time.  If no
223	mids are available, return zero.  A future optimization
224	could make the combination of mids and uid the key we use
225	to demultiplex on (rather than mid alone).
226	In addition to the above check, the cifs demultiplex
227	code already used the command code as a secondary
228	check of the frame and if signing is negotiated the
229	response would be discarded if the mid were the same
230	but the signature was wrong.  Since the mid is not put in the
231	pending queue until later (when it is about to be dispatched)
232	we do have to limit the number of outstanding requests
233	to somewhat less than 64K-1 although it is hard to imagine
234	so many threads being in the vfs at one time.
235*/
236__u16 GetNextMid(struct TCP_Server_Info *server)
237{
238	__u16 mid = 0;
239	__u16 last_mid;
240	bool collision;
241
242	spin_lock(&GlobalMid_Lock);
243	last_mid = server->CurrentMid; /* we do not want to loop forever */
244	server->CurrentMid++;
245	/* This nested loop looks more expensive than it is.
246	In practice the list of pending requests is short,
247	fewer than 50, and the mids are likely to be unique
248	on the first pass through the loop unless some request
249	takes longer than the 64 thousand requests before it
250	(and it would also have to have been a request that
251	 did not time out) */
252	while (server->CurrentMid != last_mid) {
253		struct mid_q_entry *mid_entry;
254		unsigned int num_mids;
255
256		collision = false;
257		if (server->CurrentMid == 0)
258			server->CurrentMid++;
259
260		num_mids = 0;
261		list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
262			++num_mids;
263			if (mid_entry->mid == server->CurrentMid &&
264			    mid_entry->midState == MID_REQUEST_SUBMITTED) {
265				/* This mid is in use, try a different one */
266				collision = true;
267				break;
268			}
269		}
270
271		/*
272		 * if we have more than 32k mids in the list, then something
273		 * is very wrong. Possibly a local user is trying to DoS the
274		 * box by issuing long-running calls and SIGKILL'ing them. If
275		 * we get to 2^16 mids then we're in big trouble as this
276		 * function could loop forever.
277		 *
278		 * Go ahead and assign out the mid in this situation, but force
279		 * an eventual reconnect to clean out the pending_mid_q.
280		 */
281		if (num_mids > 32768)
282			server->tcpStatus = CifsNeedReconnect;
283
284		if (!collision) {
285			mid = server->CurrentMid;
286			break;
287		}
288		server->CurrentMid++;
289	}
290	spin_unlock(&GlobalMid_Lock);
291	return mid;
292}
293
294/* NB: MID can not be set if treeCon not passed in, in that
295   case it is responsbility of caller to set the mid */
296void
297header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
298		const struct cifs_tcon *treeCon, int word_count
299		/* length of fixed section (word count) in two byte units  */)
300{
301	struct list_head *temp_item;
302	struct cifs_ses *ses;
303	char *temp = (char *) buffer;
304
305	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
306
307	buffer->smb_buf_length = cpu_to_be32(
308	    (2 * word_count) + sizeof(struct smb_hdr) -
309	    4 /*  RFC 1001 length field does not count */  +
310	    2 /* for bcc field itself */) ;
311
312	buffer->Protocol[0] = 0xFF;
313	buffer->Protocol[1] = 'S';
314	buffer->Protocol[2] = 'M';
315	buffer->Protocol[3] = 'B';
316	buffer->Command = smb_command;
317	buffer->Flags = 0x00;	/* case sensitive */
318	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
319	buffer->Pid = cpu_to_le16((__u16)current->tgid);
320	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
321	if (treeCon) {
322		buffer->Tid = treeCon->tid;
323		if (treeCon->ses) {
324			if (treeCon->ses->capabilities & CAP_UNICODE)
325				buffer->Flags2 |= SMBFLG2_UNICODE;
326			if (treeCon->ses->capabilities & CAP_STATUS32)
327				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
328
329			/* Uid is not converted */
330			buffer->Uid = treeCon->ses->Suid;
331			buffer->Mid = GetNextMid(treeCon->ses->server);
332			if (multiuser_mount != 0) {
333		/* For the multiuser case, there are few obvious technically  */
334		/* possible mechanisms to match the local linux user (uid)    */
335		/* to a valid remote smb user (smb_uid):		      */
336		/* 	1) Query Winbind (or other local pam/nss daemon       */
337		/* 	  for userid/password/logon_domain or credential      */
338		/*      2) Query Winbind for uid to sid to username mapping   */
339		/* 	   and see if we have a matching password for existing*/
340		/*         session for that user perhas getting password by   */
341		/*         adding a new pam_cifs module that stores passwords */
342		/*         so that the cifs vfs can get at that for all logged*/
343		/*	   on users					      */
344		/*	3) (Which is the mechanism we have chosen)	      */
345		/*	   Search through sessions to the same server for a   */
346		/*	   a match on the uid that was passed in on mount     */
347		/*         with the current processes uid (or euid?) and use  */
348		/* 	   that smb uid.   If no existing smb session for     */
349		/* 	   that uid found, use the default smb session ie     */
350		/*         the smb session for the volume mounted which is    */
351		/* 	   the same as would be used if the multiuser mount   */
352		/* 	   flag were disabled.  */
353
354		/*  BB Add support for establishing new tCon and SMB Session  */
355		/*      with userid/password pairs found on the smb session   */
356		/*	for other target tcp/ip addresses 		BB    */
357				if (current_fsuid() != treeCon->ses->linux_uid) {
358					cFYI(1, "Multiuser mode and UID "
359						 "did not match tcon uid");
360					spin_lock(&cifs_tcp_ses_lock);
361					list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
362						ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
363						if (ses->linux_uid == current_fsuid()) {
364							if (ses->server == treeCon->ses->server) {
365								cFYI(1, "found matching uid substitute right smb_uid");
366								buffer->Uid = ses->Suid;
367								break;
368							} else {
369				/* BB eventually call cifs_setup_session here */
370								cFYI(1, "local UID found but no smb sess with this server exists");
371							}
372						}
373					}
374					spin_unlock(&cifs_tcp_ses_lock);
375				}
376			}
377		}
378		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
379			buffer->Flags2 |= SMBFLG2_DFS;
380		if (treeCon->nocase)
381			buffer->Flags  |= SMBFLG_CASELESS;
382		if ((treeCon->ses) && (treeCon->ses->server))
383			if (treeCon->ses->server->sec_mode &
384			  (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
385				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
386	}
387
388/*  endian conversion of flags is now done just before sending */
389	buffer->WordCount = (char) word_count;
390	return;
391}
392
393static int
394check_smb_hdr(struct smb_hdr *smb, __u16 mid)
395{
396	/* does it have the right SMB "signature" ? */
397	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
398		cERROR(1, "Bad protocol string signature header 0x%x",
399			*(unsigned int *)smb->Protocol);
400		return 1;
401	}
402
403	/* Make sure that message ids match */
404	if (mid != smb->Mid) {
405		cERROR(1, "Mids do not match. received=%u expected=%u",
406			smb->Mid, mid);
407		return 1;
408	}
409
410	/* if it's a response then accept */
411	if (smb->Flags & SMBFLG_RESPONSE)
412		return 0;
413
414	/* only one valid case where server sends us request */
415	if (smb->Command == SMB_COM_LOCKING_ANDX)
416		return 0;
417
418	cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
 
419	return 1;
420}
421
422int
423checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
424{
425	__u32 len = be32_to_cpu(smb->smb_buf_length);
 
426	__u32 clc_len;  /* calculated length */
427	cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
 
428
429	if (length < 2 + sizeof(struct smb_hdr)) {
430		if ((length >= sizeof(struct smb_hdr) - 1)
 
431			    && (smb->Status.CifsError != 0)) {
 
432			smb->WordCount = 0;
433			/* some error cases do not return wct and bcc */
434			return 0;
435		} else if ((length == sizeof(struct smb_hdr) + 1) &&
436				(smb->WordCount == 0)) {
437			char *tmp = (char *)smb;
438			/* Need to work around a bug in two servers here */
439			/* First, check if the part of bcc they sent was zero */
440			if (tmp[sizeof(struct smb_hdr)] == 0) {
441				/* some servers return only half of bcc
442				 * on simple responses (wct, bcc both zero)
443				 * in particular have seen this on
444				 * ulogoffX and FindClose. This leaves
445				 * one byte of bcc potentially unitialized
446				 */
447				/* zero rest of bcc */
448				tmp[sizeof(struct smb_hdr)+1] = 0;
449				return 0;
450			}
451			cERROR(1, "rcvd invalid byte count (bcc)");
452		} else {
453			cERROR(1, "Length less than smb header size");
454		}
455		return 1;
456	}
457	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
458		cERROR(1, "smb length greater than MaxBufSize, mid=%d",
459				   smb->Mid);
460		return 1;
461	}
462
463	if (check_smb_hdr(smb, mid))
464		return 1;
 
465	clc_len = smbCalcSize(smb);
466
467	if (4 + len != length) {
468		cERROR(1, "Length read does not match RFC1001 length %d",
469			   len);
470		return 1;
471	}
472
473	if (4 + len != clc_len) {
 
474		/* check if bcc wrapped around for large read responses */
475		if ((len > 64 * 1024) && (len > clc_len)) {
476			/* check if lengths match mod 64K */
477			if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
478				return 0; /* bcc wrapped */
479		}
480		cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
481				clc_len, 4 + len, smb->Mid);
482
483		if (4 + len < clc_len) {
484			cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
485					len, smb->Mid);
486			return 1;
487		} else if (len > clc_len + 512) {
488			/*
489			 * Some servers (Windows XP in particular) send more
490			 * data than the lengths in the SMB packet would
491			 * indicate on certain calls (byte range locks and
492			 * trans2 find first calls in particular). While the
493			 * client can handle such a frame by ignoring the
494			 * trailing data, we choose limit the amount of extra
495			 * data to 512 bytes.
496			 */
497			cERROR(1, "RFC1001 size %u more than 512 bytes larger "
498				  "than SMB for mid=%u", len, smb->Mid);
499			return 1;
500		}
501	}
502	return 0;
503}
504
505bool
506is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
507{
 
508	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
509	struct list_head *tmp, *tmp1, *tmp2;
510	struct cifs_ses *ses;
511	struct cifs_tcon *tcon;
512	struct cifsInodeInfo *pCifsInode;
513	struct cifsFileInfo *netfile;
514
515	cFYI(1, "Checking for oplock break or dnotify response");
516	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
517	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
518		struct smb_com_transaction_change_notify_rsp *pSMBr =
519			(struct smb_com_transaction_change_notify_rsp *)buf;
520		struct file_notify_information *pnotify;
521		__u32 data_offset = 0;
 
 
522		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
523			data_offset = le32_to_cpu(pSMBr->DataOffset);
524
 
 
 
 
 
 
525			pnotify = (struct file_notify_information *)
526				((char *)&pSMBr->hdr.Protocol + data_offset);
527			cFYI(1, "dnotify on %s Action: 0x%x",
528				 pnotify->FileName, pnotify->Action);
529			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
530				sizeof(struct smb_hdr)+60); */
531			return true;
532		}
533		if (pSMBr->hdr.Status.CifsError) {
534			cFYI(1, "notify err 0x%d",
535				pSMBr->hdr.Status.CifsError);
536			return true;
537		}
538		return false;
539	}
540	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
541		return false;
542	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
543		/* no sense logging error on invalid handle on oplock
544		   break - harmless race between close request and oplock
545		   break response is expected from time to time writing out
546		   large dirty files cached on the client */
547		if ((NT_STATUS_INVALID_HANDLE) ==
548		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
549			cFYI(1, "invalid handle on oplock break");
550			return true;
551		} else if (ERRbadfid ==
552		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
553			return true;
554		} else {
555			return false; /* on valid oplock brk we get "request" */
556		}
557	}
558	if (pSMB->hdr.WordCount != 8)
559		return false;
560
561	cFYI(1, "oplock type 0x%d level 0x%d",
562		 pSMB->LockType, pSMB->OplockLevel);
563	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
564		return false;
565
 
 
 
566	/* look up tcon based on tid & uid */
567	spin_lock(&cifs_tcp_ses_lock);
568	list_for_each(tmp, &srv->smb_ses_list) {
569		ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
570		list_for_each(tmp1, &ses->tcon_list) {
571			tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
572			if (tcon->tid != buf->Tid)
573				continue;
574
575			cifs_stats_inc(&tcon->num_oplock_brks);
576			spin_lock(&cifs_file_list_lock);
577			list_for_each(tmp2, &tcon->openFileList) {
578				netfile = list_entry(tmp2, struct cifsFileInfo,
579						     tlist);
580				if (pSMB->Fid != netfile->netfid)
581					continue;
582
583				cFYI(1, "file id match, oplock break");
584				pCifsInode = CIFS_I(netfile->dentry->d_inode);
585
586				cifs_set_oplock_level(pCifsInode,
587					pSMB->OplockLevel ? OPLOCK_READ : 0);
588				queue_work(system_nrt_wq,
589					   &netfile->oplock_break);
 
590				netfile->oplock_break_cancelled = false;
 
591
592				spin_unlock(&cifs_file_list_lock);
593				spin_unlock(&cifs_tcp_ses_lock);
594				return true;
595			}
596			spin_unlock(&cifs_file_list_lock);
597			spin_unlock(&cifs_tcp_ses_lock);
598			cFYI(1, "No matching file for oplock break");
599			return true;
600		}
601	}
602	spin_unlock(&cifs_tcp_ses_lock);
603	cFYI(1, "Can not process oplock break for non-existent connection");
604	return true;
605}
606
607void
608dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
609{
610	int i, j;
611	char debug_line[17];
612	unsigned char *buffer;
613
614	if (traceSMB == 0)
615		return;
616
617	buffer = (unsigned char *) smb_buf;
618	for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
619		if (i % 8 == 0) {
620			/* have reached the beginning of line */
621			printk(KERN_DEBUG "| ");
622			j = 0;
623		}
624		printk("%0#4x ", buffer[i]);
625		debug_line[2 * j] = ' ';
626		if (isprint(buffer[i]))
627			debug_line[1 + (2 * j)] = buffer[i];
628		else
629			debug_line[1 + (2 * j)] = '_';
630
631		if (i % 8 == 7) {
632			/* reached end of line, time to print ascii */
633			debug_line[16] = 0;
634			printk(" | %s\n", debug_line);
635		}
636	}
637	for (; j < 8; j++) {
638		printk("     ");
639		debug_line[2 * j] = ' ';
640		debug_line[1 + (2 * j)] = ' ';
641	}
642	printk(" | %s\n", debug_line);
643	return;
644}
645
646void
647cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
648{
649	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 
 
 
 
 
650		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
651		cERROR(1, "Autodisabling the use of server inode numbers on "
652			   "%s. This server doesn't seem to support them "
653			   "properly. Hardlinks will not be recognized on this "
654			   "mount. Consider mounting with the \"noserverino\" "
655			   "option to silence this message.",
656			   cifs_sb_master_tcon(cifs_sb)->treeName);
657	}
658}
659
660void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
661{
662	oplock &= 0xF;
663
664	if (oplock == OPLOCK_EXCLUSIVE) {
665		cinode->clientCanCacheAll = true;
666		cinode->clientCanCacheRead = true;
667		cFYI(1, "Exclusive Oplock granted on inode %p",
668		     &cinode->vfs_inode);
669	} else if (oplock == OPLOCK_READ) {
670		cinode->clientCanCacheAll = false;
671		cinode->clientCanCacheRead = true;
672		cFYI(1, "Level II Oplock granted on inode %p",
673		    &cinode->vfs_inode);
674	} else {
675		cinode->clientCanCacheAll = false;
676		cinode->clientCanCacheRead = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
677	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
678}
v6.2
   1// SPDX-License-Identifier: LGPL-2.1
   2/*
 
   3 *
   4 *   Copyright (C) International Business Machines  Corp., 2002,2008
   5 *   Author(s): Steve French (sfrench@us.ibm.com)
   6 *
 
 
 
 
 
 
 
 
 
 
 
 
 
   7 */
   8
   9#include <linux/slab.h>
  10#include <linux/ctype.h>
  11#include <linux/mempool.h>
  12#include <linux/vmalloc.h>
  13#include "cifspdu.h"
  14#include "cifsglob.h"
  15#include "cifsproto.h"
  16#include "cifs_debug.h"
  17#include "smberr.h"
  18#include "nterr.h"
  19#include "cifs_unicode.h"
  20#include "smb2pdu.h"
  21#include "cifsfs.h"
  22#ifdef CONFIG_CIFS_DFS_UPCALL
  23#include "dns_resolve.h"
  24#endif
  25#include "fs_context.h"
  26#include "cached_dir.h"
  27
  28extern mempool_t *cifs_sm_req_poolp;
  29extern mempool_t *cifs_req_poolp;
  30
  31/* The xid serves as a useful identifier for each incoming vfs request,
  32   in a similar way to the mid which is useful to track each sent smb,
  33   and CurrentXid can also provide a running counter (although it
  34   will eventually wrap past zero) of the total vfs operations handled
  35   since the cifs fs was mounted */
  36
  37unsigned int
  38_get_xid(void)
  39{
  40	unsigned int xid;
  41
  42	spin_lock(&GlobalMid_Lock);
  43	GlobalTotalActiveXid++;
  44
  45	/* keep high water mark for number of simultaneous ops in filesystem */
  46	if (GlobalTotalActiveXid > GlobalMaxActiveXid)
  47		GlobalMaxActiveXid = GlobalTotalActiveXid;
  48	if (GlobalTotalActiveXid > 65000)
  49		cifs_dbg(FYI, "warning: more than 65000 requests active\n");
  50	xid = GlobalCurrentXid++;
  51	spin_unlock(&GlobalMid_Lock);
  52	return xid;
  53}
  54
  55void
  56_free_xid(unsigned int xid)
  57{
  58	spin_lock(&GlobalMid_Lock);
  59	/* if (GlobalTotalActiveXid == 0)
  60		BUG(); */
  61	GlobalTotalActiveXid--;
  62	spin_unlock(&GlobalMid_Lock);
  63}
  64
  65struct cifs_ses *
  66sesInfoAlloc(void)
  67{
  68	struct cifs_ses *ret_buf;
  69
  70	ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
  71	if (ret_buf) {
  72		atomic_inc(&sesInfoAllocCount);
  73		spin_lock_init(&ret_buf->ses_lock);
  74		ret_buf->ses_status = SES_NEW;
  75		++ret_buf->ses_count;
  76		INIT_LIST_HEAD(&ret_buf->smb_ses_list);
  77		INIT_LIST_HEAD(&ret_buf->tcon_list);
  78		mutex_init(&ret_buf->session_mutex);
  79		spin_lock_init(&ret_buf->iface_lock);
  80		INIT_LIST_HEAD(&ret_buf->iface_list);
  81		spin_lock_init(&ret_buf->chan_lock);
  82	}
  83	return ret_buf;
  84}
  85
  86void
  87sesInfoFree(struct cifs_ses *buf_to_free)
  88{
  89	struct cifs_server_iface *iface = NULL, *niface = NULL;
  90
  91	if (buf_to_free == NULL) {
  92		cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
  93		return;
  94	}
  95
  96	atomic_dec(&sesInfoAllocCount);
  97	kfree(buf_to_free->serverOS);
  98	kfree(buf_to_free->serverDomain);
  99	kfree(buf_to_free->serverNOS);
 100	kfree_sensitive(buf_to_free->password);
 
 
 
 101	kfree(buf_to_free->user_name);
 102	kfree(buf_to_free->domainName);
 103	kfree_sensitive(buf_to_free->auth_key.response);
 104	spin_lock(&buf_to_free->iface_lock);
 105	list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
 106				 iface_head)
 107		kref_put(&iface->refcount, release_iface);
 108	spin_unlock(&buf_to_free->iface_lock);
 109	kfree_sensitive(buf_to_free);
 110}
 111
 112struct cifs_tcon *
 113tconInfoAlloc(void)
 114{
 115	struct cifs_tcon *ret_buf;
 116
 117	ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
 118	if (!ret_buf)
 119		return NULL;
 120	ret_buf->cfids = init_cached_dirs();
 121	if (!ret_buf->cfids) {
 122		kfree(ret_buf);
 123		return NULL;
 124	}
 125
 126	atomic_inc(&tconInfoAllocCount);
 127	ret_buf->status = TID_NEW;
 128	++ret_buf->tc_count;
 129	spin_lock_init(&ret_buf->tc_lock);
 130	INIT_LIST_HEAD(&ret_buf->openFileList);
 131	INIT_LIST_HEAD(&ret_buf->tcon_list);
 132	spin_lock_init(&ret_buf->open_file_lock);
 133	spin_lock_init(&ret_buf->stat_lock);
 134	atomic_set(&ret_buf->num_local_opens, 0);
 135	atomic_set(&ret_buf->num_remote_opens, 0);
 136
 137	return ret_buf;
 138}
 139
 140void
 141tconInfoFree(struct cifs_tcon *tcon)
 142{
 143	if (tcon == NULL) {
 144		cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
 145		return;
 146	}
 147	free_cached_dirs(tcon->cfids);
 148	atomic_dec(&tconInfoAllocCount);
 149	kfree(tcon->nativeFileSystem);
 150	kfree_sensitive(tcon->password);
 151	kfree(tcon);
 
 
 
 152}
 153
 154struct smb_hdr *
 155cifs_buf_get(void)
 156{
 157	struct smb_hdr *ret_buf = NULL;
 158	/*
 159	 * SMB2 header is bigger than CIFS one - no problems to clean some
 160	 * more bytes for CIFS.
 161	 */
 162	size_t buf_size = sizeof(struct smb2_hdr);
 163
 164	/*
 165	 * We could use negotiated size instead of max_msgsize -
 166	 * but it may be more efficient to always alloc same size
 167	 * albeit slightly larger than necessary and maxbuffersize
 168	 * defaults to this and can not be bigger.
 169	 */
 170	ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
 171
 172	/* clear the first few header bytes */
 173	/* for most paths, more is cleared in header_assemble */
 174	memset(ret_buf, 0, buf_size + 3);
 175	atomic_inc(&buf_alloc_count);
 
 176#ifdef CONFIG_CIFS_STATS2
 177	atomic_inc(&total_buf_alloc_count);
 178#endif /* CONFIG_CIFS_STATS2 */
 
 179
 180	return ret_buf;
 181}
 182
 183void
 184cifs_buf_release(void *buf_to_free)
 185{
 186	if (buf_to_free == NULL) {
 187		/* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
 188		return;
 189	}
 190	mempool_free(buf_to_free, cifs_req_poolp);
 191
 192	atomic_dec(&buf_alloc_count);
 193	return;
 194}
 195
 196struct smb_hdr *
 197cifs_small_buf_get(void)
 198{
 199	struct smb_hdr *ret_buf = NULL;
 200
 201/* We could use negotiated size instead of max_msgsize -
 202   but it may be more efficient to always alloc same size
 203   albeit slightly larger than necessary and maxbuffersize
 204   defaults to this and can not be bigger */
 205	ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
 
 206	/* No need to clear memory here, cleared in header assemble */
 207	/*	memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
 208	atomic_inc(&small_buf_alloc_count);
 209#ifdef CONFIG_CIFS_STATS2
 210	atomic_inc(&total_small_buf_alloc_count);
 211#endif /* CONFIG_CIFS_STATS2 */
 212
 
 213	return ret_buf;
 214}
 215
 216void
 217cifs_small_buf_release(void *buf_to_free)
 218{
 219
 220	if (buf_to_free == NULL) {
 221		cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
 222		return;
 223	}
 224	mempool_free(buf_to_free, cifs_sm_req_poolp);
 225
 226	atomic_dec(&small_buf_alloc_count);
 227	return;
 228}
 229
 230void
 231free_rsp_buf(int resp_buftype, void *rsp)
 232{
 233	if (resp_buftype == CIFS_SMALL_BUFFER)
 234		cifs_small_buf_release(rsp);
 235	else if (resp_buftype == CIFS_LARGE_BUFFER)
 236		cifs_buf_release(rsp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237}
 238
 239/* NB: MID can not be set if treeCon not passed in, in that
 240   case it is responsbility of caller to set the mid */
 241void
 242header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
 243		const struct cifs_tcon *treeCon, int word_count
 244		/* length of fixed section (word count) in two byte units  */)
 245{
 
 
 246	char *temp = (char *) buffer;
 247
 248	memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
 249
 250	buffer->smb_buf_length = cpu_to_be32(
 251	    (2 * word_count) + sizeof(struct smb_hdr) -
 252	    4 /*  RFC 1001 length field does not count */  +
 253	    2 /* for bcc field itself */) ;
 254
 255	buffer->Protocol[0] = 0xFF;
 256	buffer->Protocol[1] = 'S';
 257	buffer->Protocol[2] = 'M';
 258	buffer->Protocol[3] = 'B';
 259	buffer->Command = smb_command;
 260	buffer->Flags = 0x00;	/* case sensitive */
 261	buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
 262	buffer->Pid = cpu_to_le16((__u16)current->tgid);
 263	buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
 264	if (treeCon) {
 265		buffer->Tid = treeCon->tid;
 266		if (treeCon->ses) {
 267			if (treeCon->ses->capabilities & CAP_UNICODE)
 268				buffer->Flags2 |= SMBFLG2_UNICODE;
 269			if (treeCon->ses->capabilities & CAP_STATUS32)
 270				buffer->Flags2 |= SMBFLG2_ERR_STATUS;
 271
 272			/* Uid is not converted */
 273			buffer->Uid = treeCon->ses->Suid;
 274			if (treeCon->ses->server)
 275				buffer->Mid = get_next_mid(treeCon->ses->server);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 276		}
 277		if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
 278			buffer->Flags2 |= SMBFLG2_DFS;
 279		if (treeCon->nocase)
 280			buffer->Flags  |= SMBFLG_CASELESS;
 281		if ((treeCon->ses) && (treeCon->ses->server))
 282			if (treeCon->ses->server->sign)
 
 283				buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 284	}
 285
 286/*  endian conversion of flags is now done just before sending */
 287	buffer->WordCount = (char) word_count;
 288	return;
 289}
 290
 291static int
 292check_smb_hdr(struct smb_hdr *smb)
 293{
 294	/* does it have the right SMB "signature" ? */
 295	if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
 296		cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
 297			 *(unsigned int *)smb->Protocol);
 
 
 
 
 
 
 
 298		return 1;
 299	}
 300
 301	/* if it's a response then accept */
 302	if (smb->Flags & SMBFLG_RESPONSE)
 303		return 0;
 304
 305	/* only one valid case where server sends us request */
 306	if (smb->Command == SMB_COM_LOCKING_ANDX)
 307		return 0;
 308
 309	cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
 310		 get_mid(smb));
 311	return 1;
 312}
 313
 314int
 315checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
 316{
 317	struct smb_hdr *smb = (struct smb_hdr *)buf;
 318	__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
 319	__u32 clc_len;  /* calculated length */
 320	cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
 321		 total_read, rfclen);
 322
 323	/* is this frame too small to even get to a BCC? */
 324	if (total_read < 2 + sizeof(struct smb_hdr)) {
 325		if ((total_read >= sizeof(struct smb_hdr) - 1)
 326			    && (smb->Status.CifsError != 0)) {
 327			/* it's an error return */
 328			smb->WordCount = 0;
 329			/* some error cases do not return wct and bcc */
 330			return 0;
 331		} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
 332				(smb->WordCount == 0)) {
 333			char *tmp = (char *)smb;
 334			/* Need to work around a bug in two servers here */
 335			/* First, check if the part of bcc they sent was zero */
 336			if (tmp[sizeof(struct smb_hdr)] == 0) {
 337				/* some servers return only half of bcc
 338				 * on simple responses (wct, bcc both zero)
 339				 * in particular have seen this on
 340				 * ulogoffX and FindClose. This leaves
 341				 * one byte of bcc potentially unitialized
 342				 */
 343				/* zero rest of bcc */
 344				tmp[sizeof(struct smb_hdr)+1] = 0;
 345				return 0;
 346			}
 347			cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
 348		} else {
 349			cifs_dbg(VFS, "Length less than smb header size\n");
 350		}
 351		return -EIO;
 
 
 
 
 
 352	}
 353
 354	/* otherwise, there is enough to get to the BCC */
 355	if (check_smb_hdr(smb))
 356		return -EIO;
 357	clc_len = smbCalcSize(smb);
 358
 359	if (4 + rfclen != total_read) {
 360		cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
 361			 rfclen);
 362		return -EIO;
 363	}
 364
 365	if (4 + rfclen != clc_len) {
 366		__u16 mid = get_mid(smb);
 367		/* check if bcc wrapped around for large read responses */
 368		if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
 369			/* check if lengths match mod 64K */
 370			if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
 371				return 0; /* bcc wrapped */
 372		}
 373		cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
 374			 clc_len, 4 + rfclen, mid);
 375
 376		if (4 + rfclen < clc_len) {
 377			cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
 378				 rfclen, mid);
 379			return -EIO;
 380		} else if (rfclen > clc_len + 512) {
 381			/*
 382			 * Some servers (Windows XP in particular) send more
 383			 * data than the lengths in the SMB packet would
 384			 * indicate on certain calls (byte range locks and
 385			 * trans2 find first calls in particular). While the
 386			 * client can handle such a frame by ignoring the
 387			 * trailing data, we choose limit the amount of extra
 388			 * data to 512 bytes.
 389			 */
 390			cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
 391				 rfclen, mid);
 392			return -EIO;
 393		}
 394	}
 395	return 0;
 396}
 397
 398bool
 399is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
 400{
 401	struct smb_hdr *buf = (struct smb_hdr *)buffer;
 402	struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
 403	struct TCP_Server_Info *pserver;
 404	struct cifs_ses *ses;
 405	struct cifs_tcon *tcon;
 406	struct cifsInodeInfo *pCifsInode;
 407	struct cifsFileInfo *netfile;
 408
 409	cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
 410	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
 411	   (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
 412		struct smb_com_transaction_change_notify_rsp *pSMBr =
 413			(struct smb_com_transaction_change_notify_rsp *)buf;
 414		struct file_notify_information *pnotify;
 415		__u32 data_offset = 0;
 416		size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
 417
 418		if (get_bcc(buf) > sizeof(struct file_notify_information)) {
 419			data_offset = le32_to_cpu(pSMBr->DataOffset);
 420
 421			if (data_offset >
 422			    len - sizeof(struct file_notify_information)) {
 423				cifs_dbg(FYI, "Invalid data_offset %u\n",
 424					 data_offset);
 425				return true;
 426			}
 427			pnotify = (struct file_notify_information *)
 428				((char *)&pSMBr->hdr.Protocol + data_offset);
 429			cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
 430				 pnotify->FileName, pnotify->Action);
 431			/*   cifs_dump_mem("Rcvd notify Data: ",buf,
 432				sizeof(struct smb_hdr)+60); */
 433			return true;
 434		}
 435		if (pSMBr->hdr.Status.CifsError) {
 436			cifs_dbg(FYI, "notify err 0x%x\n",
 437				 pSMBr->hdr.Status.CifsError);
 438			return true;
 439		}
 440		return false;
 441	}
 442	if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
 443		return false;
 444	if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
 445		/* no sense logging error on invalid handle on oplock
 446		   break - harmless race between close request and oplock
 447		   break response is expected from time to time writing out
 448		   large dirty files cached on the client */
 449		if ((NT_STATUS_INVALID_HANDLE) ==
 450		   le32_to_cpu(pSMB->hdr.Status.CifsError)) {
 451			cifs_dbg(FYI, "Invalid handle on oplock break\n");
 452			return true;
 453		} else if (ERRbadfid ==
 454		   le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
 455			return true;
 456		} else {
 457			return false; /* on valid oplock brk we get "request" */
 458		}
 459	}
 460	if (pSMB->hdr.WordCount != 8)
 461		return false;
 462
 463	cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
 464		 pSMB->LockType, pSMB->OplockLevel);
 465	if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
 466		return false;
 467
 468	/* If server is a channel, select the primary channel */
 469	pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
 470
 471	/* look up tcon based on tid & uid */
 472	spin_lock(&cifs_tcp_ses_lock);
 473	list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
 474		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
 
 
 475			if (tcon->tid != buf->Tid)
 476				continue;
 477
 478			cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
 479			spin_lock(&tcon->open_file_lock);
 480			list_for_each_entry(netfile, &tcon->openFileList, tlist) {
 481				if (pSMB->Fid != netfile->fid.netfid)
 
 
 482					continue;
 483
 484				cifs_dbg(FYI, "file id match, oplock break\n");
 485				pCifsInode = CIFS_I(d_inode(netfile->dentry));
 486
 487				set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
 488					&pCifsInode->flags);
 489
 490				netfile->oplock_epoch = 0;
 491				netfile->oplock_level = pSMB->OplockLevel;
 492				netfile->oplock_break_cancelled = false;
 493				cifs_queue_oplock_break(netfile);
 494
 495				spin_unlock(&tcon->open_file_lock);
 496				spin_unlock(&cifs_tcp_ses_lock);
 497				return true;
 498			}
 499			spin_unlock(&tcon->open_file_lock);
 500			spin_unlock(&cifs_tcp_ses_lock);
 501			cifs_dbg(FYI, "No matching file for oplock break\n");
 502			return true;
 503		}
 504	}
 505	spin_unlock(&cifs_tcp_ses_lock);
 506	cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
 507	return true;
 508}
 509
 510void
 511dump_smb(void *buf, int smb_buf_length)
 512{
 
 
 
 
 513	if (traceSMB == 0)
 514		return;
 515
 516	print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
 517		       smb_buf_length, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 518}
 519
 520void
 521cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
 522{
 523	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 524		struct cifs_tcon *tcon = NULL;
 525
 526		if (cifs_sb->master_tlink)
 527			tcon = cifs_sb_master_tcon(cifs_sb);
 528
 529		cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
 530		cifs_sb->mnt_cifs_serverino_autodisabled = true;
 531		cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
 532			 tcon ? tcon->tree_name : "new server");
 533		cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
 534		cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
 535
 536	}
 537}
 538
 539void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
 540{
 541	oplock &= 0xF;
 542
 543	if (oplock == OPLOCK_EXCLUSIVE) {
 544		cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
 545		cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
 546			 &cinode->netfs.inode);
 
 547	} else if (oplock == OPLOCK_READ) {
 548		cinode->oplock = CIFS_CACHE_READ_FLG;
 549		cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
 550			 &cinode->netfs.inode);
 551	} else
 552		cinode->oplock = 0;
 553}
 554
 555/*
 556 * We wait for oplock breaks to be processed before we attempt to perform
 557 * writes.
 558 */
 559int cifs_get_writer(struct cifsInodeInfo *cinode)
 560{
 561	int rc;
 562
 563start:
 564	rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
 565			 TASK_KILLABLE);
 566	if (rc)
 567		return rc;
 568
 569	spin_lock(&cinode->writers_lock);
 570	if (!cinode->writers)
 571		set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
 572	cinode->writers++;
 573	/* Check to see if we have started servicing an oplock break */
 574	if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
 575		cinode->writers--;
 576		if (cinode->writers == 0) {
 577			clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
 578			wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
 579		}
 580		spin_unlock(&cinode->writers_lock);
 581		goto start;
 582	}
 583	spin_unlock(&cinode->writers_lock);
 584	return 0;
 585}
 586
 587void cifs_put_writer(struct cifsInodeInfo *cinode)
 588{
 589	spin_lock(&cinode->writers_lock);
 590	cinode->writers--;
 591	if (cinode->writers == 0) {
 592		clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
 593		wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
 594	}
 595	spin_unlock(&cinode->writers_lock);
 596}
 597
 598/**
 599 * cifs_queue_oplock_break - queue the oplock break handler for cfile
 600 * @cfile: The file to break the oplock on
 601 *
 602 * This function is called from the demultiplex thread when it
 603 * receives an oplock break for @cfile.
 604 *
 605 * Assumes the tcon->open_file_lock is held.
 606 * Assumes cfile->file_info_lock is NOT held.
 607 */
 608void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
 609{
 610	/*
 611	 * Bump the handle refcount now while we hold the
 612	 * open_file_lock to enforce the validity of it for the oplock
 613	 * break handler. The matching put is done at the end of the
 614	 * handler.
 615	 */
 616	cifsFileInfo_get(cfile);
 617
 618	queue_work(cifsoplockd_wq, &cfile->oplock_break);
 619}
 620
 621void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
 622{
 623	clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
 624	wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
 625}
 626
 627bool
 628backup_cred(struct cifs_sb_info *cifs_sb)
 629{
 630	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
 631		if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
 632			return true;
 633	}
 634	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
 635		if (in_group_p(cifs_sb->ctx->backupgid))
 636			return true;
 637	}
 638
 639	return false;
 640}
 641
 642void
 643cifs_del_pending_open(struct cifs_pending_open *open)
 644{
 645	spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
 646	list_del(&open->olist);
 647	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
 648}
 649
 650void
 651cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
 652			     struct cifs_pending_open *open)
 653{
 654	memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
 655	open->oplock = CIFS_OPLOCK_NO_CHANGE;
 656	open->tlink = tlink;
 657	fid->pending_open = open;
 658	list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
 659}
 660
 661void
 662cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
 663		      struct cifs_pending_open *open)
 664{
 665	spin_lock(&tlink_tcon(tlink)->open_file_lock);
 666	cifs_add_pending_open_locked(fid, tlink, open);
 667	spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
 668}
 669
 670/*
 671 * Critical section which runs after acquiring deferred_lock.
 672 * As there is no reference count on cifs_deferred_close, pdclose
 673 * should not be used outside deferred_lock.
 674 */
 675bool
 676cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
 677{
 678	struct cifs_deferred_close *dclose;
 679
 680	list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
 681		if ((dclose->netfid == cfile->fid.netfid) &&
 682			(dclose->persistent_fid == cfile->fid.persistent_fid) &&
 683			(dclose->volatile_fid == cfile->fid.volatile_fid)) {
 684			*pdclose = dclose;
 685			return true;
 686		}
 687	}
 688	return false;
 689}
 690
 691/*
 692 * Critical section which runs after acquiring deferred_lock.
 693 */
 694void
 695cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
 696{
 697	bool is_deferred = false;
 698	struct cifs_deferred_close *pdclose;
 699
 700	is_deferred = cifs_is_deferred_close(cfile, &pdclose);
 701	if (is_deferred) {
 702		kfree(dclose);
 703		return;
 704	}
 705
 706	dclose->tlink = cfile->tlink;
 707	dclose->netfid = cfile->fid.netfid;
 708	dclose->persistent_fid = cfile->fid.persistent_fid;
 709	dclose->volatile_fid = cfile->fid.volatile_fid;
 710	list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
 711}
 712
 713/*
 714 * Critical section which runs after acquiring deferred_lock.
 715 */
 716void
 717cifs_del_deferred_close(struct cifsFileInfo *cfile)
 718{
 719	bool is_deferred = false;
 720	struct cifs_deferred_close *dclose;
 721
 722	is_deferred = cifs_is_deferred_close(cfile, &dclose);
 723	if (!is_deferred)
 724		return;
 725	list_del(&dclose->dlist);
 726	kfree(dclose);
 727}
 728
 729void
 730cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
 731{
 732	struct cifsFileInfo *cfile = NULL;
 733	struct file_list *tmp_list, *tmp_next_list;
 734	struct list_head file_head;
 735
 736	if (cifs_inode == NULL)
 737		return;
 738
 739	INIT_LIST_HEAD(&file_head);
 740	spin_lock(&cifs_inode->open_file_lock);
 741	list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
 742		if (delayed_work_pending(&cfile->deferred)) {
 743			if (cancel_delayed_work(&cfile->deferred)) {
 744				cifs_del_deferred_close(cfile);
 745
 746				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
 747				if (tmp_list == NULL)
 748					break;
 749				tmp_list->cfile = cfile;
 750				list_add_tail(&tmp_list->list, &file_head);
 751			}
 752		}
 753	}
 754	spin_unlock(&cifs_inode->open_file_lock);
 755
 756	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
 757		_cifsFileInfo_put(tmp_list->cfile, true, false);
 758		list_del(&tmp_list->list);
 759		kfree(tmp_list);
 760	}
 761}
 762
 763void
 764cifs_close_all_deferred_files(struct cifs_tcon *tcon)
 765{
 766	struct cifsFileInfo *cfile;
 767	struct file_list *tmp_list, *tmp_next_list;
 768	struct list_head file_head;
 769
 770	INIT_LIST_HEAD(&file_head);
 771	spin_lock(&tcon->open_file_lock);
 772	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
 773		if (delayed_work_pending(&cfile->deferred)) {
 774			if (cancel_delayed_work(&cfile->deferred)) {
 775				cifs_del_deferred_close(cfile);
 776
 777				tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
 778				if (tmp_list == NULL)
 779					break;
 780				tmp_list->cfile = cfile;
 781				list_add_tail(&tmp_list->list, &file_head);
 782			}
 783		}
 784	}
 785	spin_unlock(&tcon->open_file_lock);
 786
 787	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
 788		_cifsFileInfo_put(tmp_list->cfile, true, false);
 789		list_del(&tmp_list->list);
 790		kfree(tmp_list);
 791	}
 792}
 793void
 794cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
 795{
 796	struct cifsFileInfo *cfile;
 797	struct file_list *tmp_list, *tmp_next_list;
 798	struct list_head file_head;
 799	void *page;
 800	const char *full_path;
 801
 802	INIT_LIST_HEAD(&file_head);
 803	page = alloc_dentry_path();
 804	spin_lock(&tcon->open_file_lock);
 805	list_for_each_entry(cfile, &tcon->openFileList, tlist) {
 806		full_path = build_path_from_dentry(cfile->dentry, page);
 807		if (strstr(full_path, path)) {
 808			if (delayed_work_pending(&cfile->deferred)) {
 809				if (cancel_delayed_work(&cfile->deferred)) {
 810					cifs_del_deferred_close(cfile);
 811
 812					tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
 813					if (tmp_list == NULL)
 814						break;
 815					tmp_list->cfile = cfile;
 816					list_add_tail(&tmp_list->list, &file_head);
 817				}
 818			}
 819		}
 820	}
 821	spin_unlock(&tcon->open_file_lock);
 822
 823	list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
 824		_cifsFileInfo_put(tmp_list->cfile, true, false);
 825		list_del(&tmp_list->list);
 826		kfree(tmp_list);
 827	}
 828	free_dentry_path(page);
 829}
 830
 831/* parses DFS referral V3 structure
 832 * caller is responsible for freeing target_nodes
 833 * returns:
 834 * - on success - 0
 835 * - on failure - errno
 836 */
 837int
 838parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
 839		    unsigned int *num_of_nodes,
 840		    struct dfs_info3_param **target_nodes,
 841		    const struct nls_table *nls_codepage, int remap,
 842		    const char *searchName, bool is_unicode)
 843{
 844	int i, rc = 0;
 845	char *data_end;
 846	struct dfs_referral_level_3 *ref;
 847
 848	*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
 849
 850	if (*num_of_nodes < 1) {
 851		cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
 852			 *num_of_nodes);
 853		rc = -EINVAL;
 854		goto parse_DFS_referrals_exit;
 855	}
 856
 857	ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
 858	if (ref->VersionNumber != cpu_to_le16(3)) {
 859		cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
 860			 le16_to_cpu(ref->VersionNumber));
 861		rc = -EINVAL;
 862		goto parse_DFS_referrals_exit;
 863	}
 864
 865	/* get the upper boundary of the resp buffer */
 866	data_end = (char *)rsp + rsp_size;
 867
 868	cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
 869		 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
 870
 871	*target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
 872				GFP_KERNEL);
 873	if (*target_nodes == NULL) {
 874		rc = -ENOMEM;
 875		goto parse_DFS_referrals_exit;
 876	}
 877
 878	/* collect necessary data from referrals */
 879	for (i = 0; i < *num_of_nodes; i++) {
 880		char *temp;
 881		int max_len;
 882		struct dfs_info3_param *node = (*target_nodes)+i;
 883
 884		node->flags = le32_to_cpu(rsp->DFSFlags);
 885		if (is_unicode) {
 886			__le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
 887						GFP_KERNEL);
 888			if (tmp == NULL) {
 889				rc = -ENOMEM;
 890				goto parse_DFS_referrals_exit;
 891			}
 892			cifsConvertToUTF16((__le16 *) tmp, searchName,
 893					   PATH_MAX, nls_codepage, remap);
 894			node->path_consumed = cifs_utf16_bytes(tmp,
 895					le16_to_cpu(rsp->PathConsumed),
 896					nls_codepage);
 897			kfree(tmp);
 898		} else
 899			node->path_consumed = le16_to_cpu(rsp->PathConsumed);
 900
 901		node->server_type = le16_to_cpu(ref->ServerType);
 902		node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
 903
 904		/* copy DfsPath */
 905		temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
 906		max_len = data_end - temp;
 907		node->path_name = cifs_strndup_from_utf16(temp, max_len,
 908						is_unicode, nls_codepage);
 909		if (!node->path_name) {
 910			rc = -ENOMEM;
 911			goto parse_DFS_referrals_exit;
 912		}
 913
 914		/* copy link target UNC */
 915		temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
 916		max_len = data_end - temp;
 917		node->node_name = cifs_strndup_from_utf16(temp, max_len,
 918						is_unicode, nls_codepage);
 919		if (!node->node_name) {
 920			rc = -ENOMEM;
 921			goto parse_DFS_referrals_exit;
 922		}
 923
 924		node->ttl = le32_to_cpu(ref->TimeToLive);
 925
 926		ref++;
 927	}
 928
 929parse_DFS_referrals_exit:
 930	if (rc) {
 931		free_dfs_info_array(*target_nodes, *num_of_nodes);
 932		*target_nodes = NULL;
 933		*num_of_nodes = 0;
 934	}
 935	return rc;
 936}
 937
 938struct cifs_aio_ctx *
 939cifs_aio_ctx_alloc(void)
 940{
 941	struct cifs_aio_ctx *ctx;
 942
 943	/*
 944	 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
 945	 * to false so that we know when we have to unreference pages within
 946	 * cifs_aio_ctx_release()
 947	 */
 948	ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
 949	if (!ctx)
 950		return NULL;
 951
 952	INIT_LIST_HEAD(&ctx->list);
 953	mutex_init(&ctx->aio_mutex);
 954	init_completion(&ctx->done);
 955	kref_init(&ctx->refcount);
 956	return ctx;
 957}
 958
 959void
 960cifs_aio_ctx_release(struct kref *refcount)
 961{
 962	struct cifs_aio_ctx *ctx = container_of(refcount,
 963					struct cifs_aio_ctx, refcount);
 964
 965	cifsFileInfo_put(ctx->cfile);
 966
 967	/*
 968	 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
 969	 * which means that iov_iter_get_pages() was a success and thus that
 970	 * we have taken reference on pages.
 971	 */
 972	if (ctx->bv) {
 973		unsigned i;
 974
 975		for (i = 0; i < ctx->npages; i++) {
 976			if (ctx->should_dirty)
 977				set_page_dirty(ctx->bv[i].bv_page);
 978			put_page(ctx->bv[i].bv_page);
 979		}
 980		kvfree(ctx->bv);
 981	}
 982
 983	kfree(ctx);
 984}
 985
 986#define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
 987
 988int
 989setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
 990{
 991	ssize_t rc;
 992	unsigned int cur_npages;
 993	unsigned int npages = 0;
 994	unsigned int i;
 995	size_t len;
 996	size_t count = iov_iter_count(iter);
 997	unsigned int saved_len;
 998	size_t start;
 999	unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
1000	struct page **pages = NULL;
1001	struct bio_vec *bv = NULL;
1002
1003	if (iov_iter_is_kvec(iter)) {
1004		memcpy(&ctx->iter, iter, sizeof(*iter));
1005		ctx->len = count;
1006		iov_iter_advance(iter, count);
1007		return 0;
1008	}
1009
1010	if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
1011		bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
1012
1013	if (!bv) {
1014		bv = vmalloc(array_size(max_pages, sizeof(*bv)));
1015		if (!bv)
1016			return -ENOMEM;
1017	}
1018
1019	if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
1020		pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
1021
1022	if (!pages) {
1023		pages = vmalloc(array_size(max_pages, sizeof(*pages)));
1024		if (!pages) {
1025			kvfree(bv);
1026			return -ENOMEM;
1027		}
1028	}
1029
1030	saved_len = count;
1031
1032	while (count && npages < max_pages) {
1033		rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start);
1034		if (rc < 0) {
1035			cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
1036			break;
1037		}
1038
1039		if (rc > count) {
1040			cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
1041				 count);
1042			break;
1043		}
1044
1045		count -= rc;
1046		rc += start;
1047		cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
1048
1049		if (npages + cur_npages > max_pages) {
1050			cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
1051				 npages + cur_npages, max_pages);
1052			break;
1053		}
1054
1055		for (i = 0; i < cur_npages; i++) {
1056			len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
1057			bv[npages + i].bv_page = pages[i];
1058			bv[npages + i].bv_offset = start;
1059			bv[npages + i].bv_len = len - start;
1060			rc -= len;
1061			start = 0;
1062		}
1063
1064		npages += cur_npages;
1065	}
1066
1067	kvfree(pages);
1068	ctx->bv = bv;
1069	ctx->len = saved_len - count;
1070	ctx->npages = npages;
1071	iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
1072	return 0;
1073}
1074
1075/**
1076 * cifs_alloc_hash - allocate hash and hash context together
1077 * @name: The name of the crypto hash algo
1078 * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
1079 *
1080 * The caller has to make sure @sdesc is initialized to either NULL or
1081 * a valid context. It can be freed via cifs_free_hash().
1082 */
1083int
1084cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
1085{
1086	int rc = 0;
1087	struct crypto_shash *alg = NULL;
1088
1089	if (*sdesc)
1090		return 0;
1091
1092	alg = crypto_alloc_shash(name, 0, 0);
1093	if (IS_ERR(alg)) {
1094		cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
1095		rc = PTR_ERR(alg);
1096		*sdesc = NULL;
1097		return rc;
1098	}
1099
1100	*sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
1101	if (*sdesc == NULL) {
1102		cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
1103		crypto_free_shash(alg);
1104		return -ENOMEM;
1105	}
1106
1107	(*sdesc)->tfm = alg;
1108	return 0;
1109}
1110
1111/**
1112 * cifs_free_hash - free hash and hash context together
1113 * @sdesc: Where to find the pointer to the hash TFM
1114 *
1115 * Freeing a NULL descriptor is safe.
1116 */
1117void
1118cifs_free_hash(struct shash_desc **sdesc)
1119{
1120	if (unlikely(!sdesc) || !*sdesc)
1121		return;
1122
1123	if ((*sdesc)->tfm) {
1124		crypto_free_shash((*sdesc)->tfm);
1125		(*sdesc)->tfm = NULL;
1126	}
1127
1128	kfree_sensitive(*sdesc);
1129	*sdesc = NULL;
1130}
1131
1132/**
1133 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
1134 * @rqst: The request descriptor
1135 * @page: The index of the page to query
1136 * @len: Where to store the length for this page:
1137 * @offset: Where to store the offset for this page
1138 */
1139void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
1140			  unsigned int *len, unsigned int *offset)
1141{
1142	*len = rqst->rq_pagesz;
1143	*offset = (page == 0) ? rqst->rq_offset : 0;
1144
1145	if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
1146		*len = rqst->rq_tailsz;
1147	else if (page == 0)
1148		*len = rqst->rq_pagesz - rqst->rq_offset;
1149}
1150
1151void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1152{
1153	const char *end;
1154
1155	/* skip initial slashes */
1156	while (*unc && (*unc == '\\' || *unc == '/'))
1157		unc++;
1158
1159	end = unc;
1160
1161	while (*end && !(*end == '\\' || *end == '/'))
1162		end++;
1163
1164	*h = unc;
1165	*len = end - unc;
1166}
1167
1168/**
1169 * copy_path_name - copy src path to dst, possibly truncating
1170 * @dst: The destination buffer
1171 * @src: The source name
1172 *
1173 * returns number of bytes written (including trailing nul)
1174 */
1175int copy_path_name(char *dst, const char *src)
1176{
1177	int name_len;
1178
1179	/*
1180	 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1181	 * will truncate and strlen(dst) will be PATH_MAX-1
1182	 */
1183	name_len = strscpy(dst, src, PATH_MAX);
1184	if (WARN_ON_ONCE(name_len < 0))
1185		name_len = PATH_MAX-1;
1186
1187	/* we count the trailing nul */
1188	name_len++;
1189	return name_len;
1190}
1191
1192struct super_cb_data {
1193	void *data;
1194	struct super_block *sb;
1195};
1196
1197static void tcp_super_cb(struct super_block *sb, void *arg)
1198{
1199	struct super_cb_data *sd = arg;
1200	struct TCP_Server_Info *server = sd->data;
1201	struct cifs_sb_info *cifs_sb;
1202	struct cifs_tcon *tcon;
1203
1204	if (sd->sb)
1205		return;
1206
1207	cifs_sb = CIFS_SB(sb);
1208	tcon = cifs_sb_master_tcon(cifs_sb);
1209	if (tcon->ses->server == server)
1210		sd->sb = sb;
1211}
1212
1213static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1214					    void *data)
1215{
1216	struct super_cb_data sd = {
1217		.data = data,
1218		.sb = NULL,
1219	};
1220	struct file_system_type **fs_type = (struct file_system_type *[]) {
1221		&cifs_fs_type, &smb3_fs_type, NULL,
1222	};
1223
1224	for (; *fs_type; fs_type++) {
1225		iterate_supers_type(*fs_type, f, &sd);
1226		if (sd.sb) {
1227			/*
1228			 * Grab an active reference in order to prevent automounts (DFS links)
1229			 * of expiring and then freeing up our cifs superblock pointer while
1230			 * we're doing failover.
1231			 */
1232			cifs_sb_active(sd.sb);
1233			return sd.sb;
1234		}
1235	}
1236	return ERR_PTR(-EINVAL);
1237}
1238
1239static void __cifs_put_super(struct super_block *sb)
1240{
1241	if (!IS_ERR_OR_NULL(sb))
1242		cifs_sb_deactive(sb);
1243}
1244
1245struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
1246{
1247	return __cifs_get_super(tcp_super_cb, server);
1248}
1249
1250void cifs_put_tcp_super(struct super_block *sb)
1251{
1252	__cifs_put_super(sb);
1253}
1254
1255#ifdef CONFIG_CIFS_DFS_UPCALL
1256int match_target_ip(struct TCP_Server_Info *server,
1257		    const char *share, size_t share_len,
1258		    bool *result)
1259{
1260	int rc;
1261	char *target;
1262	struct sockaddr_storage ss;
1263
1264	*result = false;
1265
1266	target = kzalloc(share_len + 3, GFP_KERNEL);
1267	if (!target)
1268		return -ENOMEM;
1269
1270	scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1271
1272	cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1273
1274	rc = dns_resolve_server_name_to_ip(target, (struct sockaddr *)&ss, NULL);
1275	kfree(target);
1276
1277	if (rc < 0)
1278		return rc;
1279
1280	spin_lock(&server->srv_lock);
1281	*result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1282	spin_unlock(&server->srv_lock);
1283	cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1284	return 0;
1285}
1286
1287int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1288{
1289	kfree(cifs_sb->prepath);
1290
1291	if (prefix && *prefix) {
1292		cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
1293		if (!cifs_sb->prepath)
1294			return -ENOMEM;
1295
1296		convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1297	} else
1298		cifs_sb->prepath = NULL;
1299
1300	cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1301	return 0;
1302}
1303#endif