Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2007
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6 * Christian Borntraeger (cborntra@de.ibm.com),
7 */
8
9#define KMSG_COMPONENT "cpcmd"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/slab.h>
15#include <linux/spinlock.h>
16#include <linux/stddef.h>
17#include <linux/string.h>
18#include <linux/mm.h>
19#include <linux/io.h>
20#include <asm/diag.h>
21#include <asm/ebcdic.h>
22#include <asm/cpcmd.h>
23
24static DEFINE_SPINLOCK(cpcmd_lock);
25static char cpcmd_buf[241];
26
27static int diag8_noresponse(int cmdlen)
28{
29 asm volatile(
30 " diag %[rx],%[ry],0x8\n"
31 : [ry] "+&d" (cmdlen)
32 : [rx] "d" (__pa(cpcmd_buf))
33 : "cc");
34 return cmdlen;
35}
36
37static int diag8_response(int cmdlen, char *response, int *rlen)
38{
39 union register_pair rx, ry;
40 int cc;
41
42 rx.even = __pa(cpcmd_buf);
43 rx.odd = __pa(response);
44 ry.even = cmdlen | 0x40000000L;
45 ry.odd = *rlen;
46 asm volatile(
47 " diag %[rx],%[ry],0x8\n"
48 " ipm %[cc]\n"
49 " srl %[cc],28\n"
50 : [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
51 : [rx] "d" (rx.pair)
52 : "cc");
53 if (cc)
54 *rlen += ry.odd;
55 else
56 *rlen = ry.odd;
57 return ry.even;
58}
59
60/*
61 * __cpcmd has some restrictions over cpcmd
62 * - __cpcmd is unlocked and therefore not SMP-safe
63 */
64int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
65{
66 int cmdlen;
67 int rc;
68 int response_len;
69
70 cmdlen = strlen(cmd);
71 BUG_ON(cmdlen > 240);
72 memcpy(cpcmd_buf, cmd, cmdlen);
73 ASCEBC(cpcmd_buf, cmdlen);
74
75 diag_stat_inc(DIAG_STAT_X008);
76 if (response) {
77 memset(response, 0, rlen);
78 response_len = rlen;
79 rc = diag8_response(cmdlen, response, &rlen);
80 EBCASC(response, response_len);
81 } else {
82 rc = diag8_noresponse(cmdlen);
83 }
84 if (response_code)
85 *response_code = rc;
86 return rlen;
87}
88EXPORT_SYMBOL(__cpcmd);
89
90int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
91{
92 unsigned long flags;
93 char *lowbuf;
94 int len;
95
96 if (is_vmalloc_or_module_addr(response)) {
97 lowbuf = kmalloc(rlen, GFP_KERNEL);
98 if (!lowbuf) {
99 pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
100 return -ENOMEM;
101 }
102 spin_lock_irqsave(&cpcmd_lock, flags);
103 len = __cpcmd(cmd, lowbuf, rlen, response_code);
104 spin_unlock_irqrestore(&cpcmd_lock, flags);
105 memcpy(response, lowbuf, rlen);
106 kfree(lowbuf);
107 } else {
108 spin_lock_irqsave(&cpcmd_lock, flags);
109 len = __cpcmd(cmd, response, rlen, response_code);
110 spin_unlock_irqrestore(&cpcmd_lock, flags);
111 }
112 return len;
113}
114EXPORT_SYMBOL(cpcmd);
1/*
2 * arch/s390/kernel/cpcmd.c
3 *
4 * S390 version
5 * Copyright IBM Corp. 1999,2007
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
7 * Christian Borntraeger (cborntra@de.ibm.com),
8 */
9
10#define KMSG_COMPONENT "cpcmd"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <linux/stddef.h>
18#include <linux/string.h>
19#include <asm/ebcdic.h>
20#include <asm/cpcmd.h>
21#include <asm/system.h>
22#include <asm/io.h>
23
24static DEFINE_SPINLOCK(cpcmd_lock);
25static char cpcmd_buf[241];
26
27static int diag8_noresponse(int cmdlen)
28{
29 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
30 register unsigned long reg3 asm ("3") = cmdlen;
31
32 asm volatile(
33#ifndef CONFIG_64BIT
34 " diag %1,%0,0x8\n"
35#else /* CONFIG_64BIT */
36 " sam31\n"
37 " diag %1,%0,0x8\n"
38 " sam64\n"
39#endif /* CONFIG_64BIT */
40 : "+d" (reg3) : "d" (reg2) : "cc");
41 return reg3;
42}
43
44static int diag8_response(int cmdlen, char *response, int *rlen)
45{
46 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
47 register unsigned long reg3 asm ("3") = (addr_t) response;
48 register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
49 register unsigned long reg5 asm ("5") = *rlen;
50
51 asm volatile(
52#ifndef CONFIG_64BIT
53 " diag %2,%0,0x8\n"
54 " brc 8,1f\n"
55 " ar %1,%4\n"
56#else /* CONFIG_64BIT */
57 " sam31\n"
58 " diag %2,%0,0x8\n"
59 " sam64\n"
60 " brc 8,1f\n"
61 " agr %1,%4\n"
62#endif /* CONFIG_64BIT */
63 "1:\n"
64 : "+d" (reg4), "+d" (reg5)
65 : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
66 *rlen = reg5;
67 return reg4;
68}
69
70/*
71 * __cpcmd has some restrictions over cpcmd
72 * - the response buffer must reside below 2GB (if any)
73 * - __cpcmd is unlocked and therefore not SMP-safe
74 */
75int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
76{
77 int cmdlen;
78 int rc;
79 int response_len;
80
81 cmdlen = strlen(cmd);
82 BUG_ON(cmdlen > 240);
83 memcpy(cpcmd_buf, cmd, cmdlen);
84 ASCEBC(cpcmd_buf, cmdlen);
85
86 if (response) {
87 memset(response, 0, rlen);
88 response_len = rlen;
89 rc = diag8_response(cmdlen, response, &rlen);
90 EBCASC(response, response_len);
91 } else {
92 rc = diag8_noresponse(cmdlen);
93 }
94 if (response_code)
95 *response_code = rc;
96 return rlen;
97}
98EXPORT_SYMBOL(__cpcmd);
99
100int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
101{
102 char *lowbuf;
103 int len;
104 unsigned long flags;
105
106 if ((virt_to_phys(response) != (unsigned long) response) ||
107 (((unsigned long)response + rlen) >> 31)) {
108 lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
109 if (!lowbuf) {
110 pr_warning("The cpcmd kernel function failed to "
111 "allocate a response buffer\n");
112 return -ENOMEM;
113 }
114 spin_lock_irqsave(&cpcmd_lock, flags);
115 len = __cpcmd(cmd, lowbuf, rlen, response_code);
116 spin_unlock_irqrestore(&cpcmd_lock, flags);
117 memcpy(response, lowbuf, rlen);
118 kfree(lowbuf);
119 } else {
120 spin_lock_irqsave(&cpcmd_lock, flags);
121 len = __cpcmd(cmd, response, rlen, response_code);
122 spin_unlock_irqrestore(&cpcmd_lock, flags);
123 }
124 return len;
125}
126EXPORT_SYMBOL(cpcmd);