Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999, 2007
  5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  6 *               Christian Borntraeger (cborntra@de.ibm.com),
  7 */
  8
  9#define KMSG_COMPONENT "cpcmd"
 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 11
 12#include <linux/kernel.h>
 13#include <linux/export.h>
 14#include <linux/slab.h>
 15#include <linux/spinlock.h>
 16#include <linux/stddef.h>
 17#include <linux/string.h>
 18#include <linux/mm.h>
 19#include <asm/diag.h>
 20#include <asm/ebcdic.h>
 21#include <asm/cpcmd.h>
 22#include <asm/io.h>
 23
 24static DEFINE_SPINLOCK(cpcmd_lock);
 25static char cpcmd_buf[241];
 26
 27static int diag8_noresponse(int cmdlen)
 28{
 29	register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
 30	register unsigned long reg3 asm ("3") = cmdlen;
 31
 32	asm volatile(
 33		"	diag	%1,%0,0x8\n"
 34		: "+d" (reg3) : "d" (reg2) : "cc");
 35	return reg3;
 
 
 36}
 37
 38static int diag8_response(int cmdlen, char *response, int *rlen)
 39{
 40	register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
 41	register unsigned long reg3 asm ("3") = (addr_t) response;
 42	register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
 43	register unsigned long reg5 asm ("5") = *rlen;
 44
 
 
 
 
 45	asm volatile(
 46		"	diag	%2,%0,0x8\n"
 47		"	brc	8,1f\n"
 48		"	agr	%1,%4\n"
 49		"1:\n"
 50		: "+d" (reg4), "+d" (reg5)
 51		: "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
 52	*rlen = reg5;
 53	return reg4;
 
 
 
 54}
 55
 56/*
 57 * __cpcmd has some restrictions over cpcmd
 58 *  - __cpcmd is unlocked and therefore not SMP-safe
 59 */
 60int  __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
 61{
 62	int cmdlen;
 63	int rc;
 64	int response_len;
 65
 66	cmdlen = strlen(cmd);
 67	BUG_ON(cmdlen > 240);
 68	memcpy(cpcmd_buf, cmd, cmdlen);
 69	ASCEBC(cpcmd_buf, cmdlen);
 70
 71	diag_stat_inc(DIAG_STAT_X008);
 72	if (response) {
 73		memset(response, 0, rlen);
 74		response_len = rlen;
 75		rc = diag8_response(cmdlen, response, &rlen);
 76		EBCASC(response, response_len);
 77        } else {
 78		rc = diag8_noresponse(cmdlen);
 79        }
 80	if (response_code)
 81		*response_code = rc;
 82	return rlen;
 83}
 84EXPORT_SYMBOL(__cpcmd);
 85
 86int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
 87{
 88	unsigned long flags;
 89	char *lowbuf;
 90	int len;
 91
 92	if (is_vmalloc_or_module_addr(response)) {
 93		lowbuf = kmalloc(rlen, GFP_KERNEL);
 94		if (!lowbuf) {
 95			pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
 96			return -ENOMEM;
 97		}
 98		spin_lock_irqsave(&cpcmd_lock, flags);
 99		len = __cpcmd(cmd, lowbuf, rlen, response_code);
100		spin_unlock_irqrestore(&cpcmd_lock, flags);
101		memcpy(response, lowbuf, rlen);
102		kfree(lowbuf);
103	} else {
104		spin_lock_irqsave(&cpcmd_lock, flags);
105		len = __cpcmd(cmd, response, rlen, response_code);
106		spin_unlock_irqrestore(&cpcmd_lock, flags);
107	}
108	return len;
109}
110EXPORT_SYMBOL(cpcmd);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999, 2007
  5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  6 *               Christian Borntraeger (cborntra@de.ibm.com),
  7 */
  8
  9#define KMSG_COMPONENT "cpcmd"
 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 11
 12#include <linux/kernel.h>
 13#include <linux/export.h>
 14#include <linux/slab.h>
 15#include <linux/spinlock.h>
 16#include <linux/stddef.h>
 17#include <linux/string.h>
 18#include <linux/mm.h>
 19#include <asm/diag.h>
 20#include <asm/ebcdic.h>
 21#include <asm/cpcmd.h>
 22#include <asm/io.h>
 23
 24static DEFINE_SPINLOCK(cpcmd_lock);
 25static char cpcmd_buf[241];
 26
 27static int diag8_noresponse(int cmdlen)
 28{
 
 
 
 29	asm volatile(
 30		"	diag	%[rx],%[ry],0x8\n"
 31		: [ry] "+&d" (cmdlen)
 32		: [rx] "d" (__pa(cpcmd_buf))
 33		: "cc");
 34	return cmdlen;
 35}
 36
 37static int diag8_response(int cmdlen, char *response, int *rlen)
 38{
 39	union register_pair rx, ry;
 40	int cc;
 
 
 41
 42	rx.even = __pa(cpcmd_buf);
 43	rx.odd	= __pa(response);
 44	ry.even = cmdlen | 0x40000000L;
 45	ry.odd	= *rlen;
 46	asm volatile(
 47		"	diag	%[rx],%[ry],0x8\n"
 48		"	ipm	%[cc]\n"
 49		"	srl	%[cc],28\n"
 50		: [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
 51		: [rx] "d" (rx.pair)
 52		: "cc");
 53	if (cc)
 54		*rlen += ry.odd;
 55	else
 56		*rlen = ry.odd;
 57	return ry.even;
 58}
 59
 60/*
 61 * __cpcmd has some restrictions over cpcmd
 62 *  - __cpcmd is unlocked and therefore not SMP-safe
 63 */
 64int  __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
 65{
 66	int cmdlen;
 67	int rc;
 68	int response_len;
 69
 70	cmdlen = strlen(cmd);
 71	BUG_ON(cmdlen > 240);
 72	memcpy(cpcmd_buf, cmd, cmdlen);
 73	ASCEBC(cpcmd_buf, cmdlen);
 74
 75	diag_stat_inc(DIAG_STAT_X008);
 76	if (response) {
 77		memset(response, 0, rlen);
 78		response_len = rlen;
 79		rc = diag8_response(cmdlen, response, &rlen);
 80		EBCASC(response, response_len);
 81        } else {
 82		rc = diag8_noresponse(cmdlen);
 83        }
 84	if (response_code)
 85		*response_code = rc;
 86	return rlen;
 87}
 88EXPORT_SYMBOL(__cpcmd);
 89
 90int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
 91{
 92	unsigned long flags;
 93	char *lowbuf;
 94	int len;
 95
 96	if (is_vmalloc_or_module_addr(response)) {
 97		lowbuf = kmalloc(rlen, GFP_KERNEL);
 98		if (!lowbuf) {
 99			pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
100			return -ENOMEM;
101		}
102		spin_lock_irqsave(&cpcmd_lock, flags);
103		len = __cpcmd(cmd, lowbuf, rlen, response_code);
104		spin_unlock_irqrestore(&cpcmd_lock, flags);
105		memcpy(response, lowbuf, rlen);
106		kfree(lowbuf);
107	} else {
108		spin_lock_irqsave(&cpcmd_lock, flags);
109		len = __cpcmd(cmd, response, rlen, response_code);
110		spin_unlock_irqrestore(&cpcmd_lock, flags);
111	}
112	return len;
113}
114EXPORT_SYMBOL(cpcmd);