Loading...
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2007
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
5 * Christian Borntraeger (cborntra@de.ibm.com),
6 */
7
8#define KMSG_COMPONENT "cpcmd"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/stddef.h>
16#include <linux/string.h>
17#include <asm/diag.h>
18#include <asm/ebcdic.h>
19#include <asm/cpcmd.h>
20#include <asm/io.h>
21
22static DEFINE_SPINLOCK(cpcmd_lock);
23static char cpcmd_buf[241];
24
25static int diag8_noresponse(int cmdlen)
26{
27 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
28 register unsigned long reg3 asm ("3") = cmdlen;
29
30 asm volatile(
31 " sam31\n"
32 " diag %1,%0,0x8\n"
33 " sam64\n"
34 : "+d" (reg3) : "d" (reg2) : "cc");
35 return reg3;
36}
37
38static int diag8_response(int cmdlen, char *response, int *rlen)
39{
40 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
41 register unsigned long reg3 asm ("3") = (addr_t) response;
42 register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
43 register unsigned long reg5 asm ("5") = *rlen;
44
45 asm volatile(
46 " sam31\n"
47 " diag %2,%0,0x8\n"
48 " sam64\n"
49 " brc 8,1f\n"
50 " agr %1,%4\n"
51 "1:\n"
52 : "+d" (reg4), "+d" (reg5)
53 : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
54 *rlen = reg5;
55 return reg4;
56}
57
58/*
59 * __cpcmd has some restrictions over cpcmd
60 * - the response buffer must reside below 2GB (if any)
61 * - __cpcmd is unlocked and therefore not SMP-safe
62 */
63int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
64{
65 int cmdlen;
66 int rc;
67 int response_len;
68
69 cmdlen = strlen(cmd);
70 BUG_ON(cmdlen > 240);
71 memcpy(cpcmd_buf, cmd, cmdlen);
72 ASCEBC(cpcmd_buf, cmdlen);
73
74 diag_stat_inc(DIAG_STAT_X008);
75 if (response) {
76 memset(response, 0, rlen);
77 response_len = rlen;
78 rc = diag8_response(cmdlen, response, &rlen);
79 EBCASC(response, response_len);
80 } else {
81 rc = diag8_noresponse(cmdlen);
82 }
83 if (response_code)
84 *response_code = rc;
85 return rlen;
86}
87EXPORT_SYMBOL(__cpcmd);
88
89int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
90{
91 char *lowbuf;
92 int len;
93 unsigned long flags;
94
95 if ((virt_to_phys(response) != (unsigned long) response) ||
96 (((unsigned long)response + rlen) >> 31)) {
97 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
98 if (!lowbuf) {
99 pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
100 return -ENOMEM;
101 }
102 spin_lock_irqsave(&cpcmd_lock, flags);
103 len = __cpcmd(cmd, lowbuf, rlen, response_code);
104 spin_unlock_irqrestore(&cpcmd_lock, flags);
105 memcpy(response, lowbuf, rlen);
106 kfree(lowbuf);
107 } else {
108 spin_lock_irqsave(&cpcmd_lock, flags);
109 len = __cpcmd(cmd, response, rlen, response_code);
110 spin_unlock_irqrestore(&cpcmd_lock, flags);
111 }
112 return len;
113}
114EXPORT_SYMBOL(cpcmd);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2007
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Christian Borntraeger (cborntra@de.ibm.com),
7 */
8
9#define KMSG_COMPONENT "cpcmd"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/stddef.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <asm/diag.h>
20#include <asm/ebcdic.h>
21#include <asm/cpcmd.h>
22#include <asm/io.h>
23
24static DEFINE_SPINLOCK(cpcmd_lock);
25static char cpcmd_buf[241];
26
27static int diag8_noresponse(int cmdlen)
28{
29 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
30 register unsigned long reg3 asm ("3") = cmdlen;
31
32 asm volatile(
33 " diag %1,%0,0x8\n"
34 : "+d" (reg3) : "d" (reg2) : "cc");
35 return reg3;
36}
37
38static int diag8_response(int cmdlen, char *response, int *rlen)
39{
40 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
41 register unsigned long reg3 asm ("3") = (addr_t) response;
42 register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
43 register unsigned long reg5 asm ("5") = *rlen;
44
45 asm volatile(
46 " diag %2,%0,0x8\n"
47 " brc 8,1f\n"
48 " agr %1,%4\n"
49 "1:\n"
50 : "+d" (reg4), "+d" (reg5)
51 : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
52 *rlen = reg5;
53 return reg4;
54}
55
56/*
57 * __cpcmd has some restrictions over cpcmd
58 * - __cpcmd is unlocked and therefore not SMP-safe
59 */
60int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
61{
62 int cmdlen;
63 int rc;
64 int response_len;
65
66 cmdlen = strlen(cmd);
67 BUG_ON(cmdlen > 240);
68 memcpy(cpcmd_buf, cmd, cmdlen);
69 ASCEBC(cpcmd_buf, cmdlen);
70
71 diag_stat_inc(DIAG_STAT_X008);
72 if (response) {
73 memset(response, 0, rlen);
74 response_len = rlen;
75 rc = diag8_response(cmdlen, response, &rlen);
76 EBCASC(response, response_len);
77 } else {
78 rc = diag8_noresponse(cmdlen);
79 }
80 if (response_code)
81 *response_code = rc;
82 return rlen;
83}
84EXPORT_SYMBOL(__cpcmd);
85
86int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
87{
88 unsigned long flags;
89 char *lowbuf;
90 int len;
91
92 if (is_vmalloc_or_module_addr(response)) {
93 lowbuf = kmalloc(rlen, GFP_KERNEL);
94 if (!lowbuf) {
95 pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
96 return -ENOMEM;
97 }
98 spin_lock_irqsave(&cpcmd_lock, flags);
99 len = __cpcmd(cmd, lowbuf, rlen, response_code);
100 spin_unlock_irqrestore(&cpcmd_lock, flags);
101 memcpy(response, lowbuf, rlen);
102 kfree(lowbuf);
103 } else {
104 spin_lock_irqsave(&cpcmd_lock, flags);
105 len = __cpcmd(cmd, response, rlen, response_code);
106 spin_unlock_irqrestore(&cpcmd_lock, flags);
107 }
108 return len;
109}
110EXPORT_SYMBOL(cpcmd);