Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4 *
  5 * Macros for SMC statistics
  6 *
  7 * Copyright IBM Corp. 2021
  8 *
  9 * Author(s):  Guvenc Gulce
 10 */
 11
 12#ifndef NET_SMC_SMC_STATS_H_
 13#define NET_SMC_SMC_STATS_H_
 14#include <linux/init.h>
 15#include <linux/mutex.h>
 16#include <linux/percpu.h>
 17#include <linux/ctype.h>
 18#include <linux/smc.h>
 19
 20#include "smc_clc.h"
 21
 22#define SMC_MAX_FBACK_RSN_CNT 36
 23
 24enum {
 25	SMC_BUF_8K,
 26	SMC_BUF_16K,
 27	SMC_BUF_32K,
 28	SMC_BUF_64K,
 29	SMC_BUF_128K,
 30	SMC_BUF_256K,
 31	SMC_BUF_512K,
 32	SMC_BUF_1024K,
 33	SMC_BUF_G_1024K,
 34	SMC_BUF_MAX,
 35};
 36
 37struct smc_stats_fback {
 38	int	fback_code;
 39	u16	count;
 40};
 41
 42struct smc_stats_rsn {
 43	struct	smc_stats_fback srv[SMC_MAX_FBACK_RSN_CNT];
 44	struct	smc_stats_fback clnt[SMC_MAX_FBACK_RSN_CNT];
 45	u64			srv_fback_cnt;
 46	u64			clnt_fback_cnt;
 47};
 48
 49struct smc_stats_rmbcnt {
 50	u64	buf_size_small_peer_cnt;
 51	u64	buf_size_small_cnt;
 52	u64	buf_full_peer_cnt;
 53	u64	buf_full_cnt;
 54	u64	reuse_cnt;
 55	u64	alloc_cnt;
 56	u64	dgrade_cnt;
 57};
 58
 59struct smc_stats_memsize {
 60	u64	buf[SMC_BUF_MAX];
 61};
 62
 63struct smc_stats_tech {
 64	struct smc_stats_memsize tx_rmbsize;
 65	struct smc_stats_memsize rx_rmbsize;
 66	struct smc_stats_memsize tx_pd;
 67	struct smc_stats_memsize rx_pd;
 68	struct smc_stats_rmbcnt rmb_tx;
 69	struct smc_stats_rmbcnt rmb_rx;
 70	u64			clnt_v1_succ_cnt;
 71	u64			clnt_v2_succ_cnt;
 72	u64			srv_v1_succ_cnt;
 73	u64			srv_v2_succ_cnt;
 74	u64			urg_data_cnt;
 75	u64			splice_cnt;
 76	u64			cork_cnt;
 77	u64			ndly_cnt;
 78	u64			rx_bytes;
 79	u64			tx_bytes;
 80	u64			rx_cnt;
 81	u64			tx_cnt;
 82	u64			rx_rmbuse;
 83	u64			tx_rmbuse;
 84};
 85
 86struct smc_stats {
 87	struct smc_stats_tech	smc[2];
 88	u64			clnt_hshake_err_cnt;
 89	u64			srv_hshake_err_cnt;
 90};
 91
 92#define SMC_STAT_PAYLOAD_SUB(_smc_stats, _tech, key, _len, _rc) \
 93do { \
 94	typeof(_smc_stats) stats = (_smc_stats); \
 95	typeof(_tech) t = (_tech); \
 96	typeof(_len) l = (_len); \
 97	int _pos; \
 98	typeof(_rc) r = (_rc); \
 99	int m = SMC_BUF_MAX - 1; \
100	this_cpu_inc((*stats).smc[t].key ## _cnt); \
101	if (r <= 0 || l <= 0) \
102		break; \
103	_pos = fls64((l - 1) >> 13); \
104	_pos = (_pos <= m) ? _pos : m; \
105	this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
106	this_cpu_add((*stats).smc[t].key ## _bytes, r); \
107} \
108while (0)
109
110#define SMC_STAT_TX_PAYLOAD(_smc, length, rcode) \
111do { \
112	typeof(_smc) __smc = _smc; \
113	struct net *_net = sock_net(&__smc->sk); \
114	struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
115	typeof(length) _len = (length); \
116	typeof(rcode) _rc = (rcode); \
117	bool is_smcd = !__smc->conn.lnk; \
118	if (is_smcd) \
119		SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, tx, _len, _rc); \
120	else \
121		SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, tx, _len, _rc); \
122} \
123while (0)
124
125#define SMC_STAT_RX_PAYLOAD(_smc, length, rcode) \
126do { \
127	typeof(_smc) __smc = _smc; \
128	struct net *_net = sock_net(&__smc->sk); \
129	struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
130	typeof(length) _len = (length); \
131	typeof(rcode) _rc = (rcode); \
132	bool is_smcd = !__smc->conn.lnk; \
133	if (is_smcd) \
134		SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_D, rx, _len, _rc); \
135	else \
136		SMC_STAT_PAYLOAD_SUB(_smc_stats, SMC_TYPE_R, rx, _len, _rc); \
137} \
138while (0)
139
140#define SMC_STAT_RMB_SIZE_SUB(_smc_stats, _tech, k, _is_add, _len) \
141do { \
142	typeof(_smc_stats) stats = (_smc_stats); \
143	typeof(_is_add) is_a = (_is_add); \
144	typeof(_len) _l = (_len); \
145	typeof(_tech) t = (_tech); \
146	int _pos; \
147	int m = SMC_BUF_MAX - 1; \
148	if (_l <= 0) \
149		break; \
150	if (is_a) { \
151		_pos = fls((_l - 1) >> 13); \
152		_pos = (_pos <= m) ? _pos : m; \
153		this_cpu_inc((*stats).smc[t].k ## _rmbsize.buf[_pos]); \
154		this_cpu_add((*stats).smc[t].k ## _rmbuse, _l); \
155	} else { \
156		this_cpu_sub((*stats).smc[t].k ## _rmbuse, _l); \
157	} \
158} \
159while (0)
160
161#define SMC_STAT_RMB_SUB(_smc_stats, type, t, key) \
162	this_cpu_inc((*(_smc_stats)).smc[t].rmb ## _ ## key.type ## _cnt)
163
164#define SMC_STAT_RMB_SIZE(_smc, _is_smcd, _is_rx, _is_add, _len) \
165do { \
166	struct net *_net = sock_net(&(_smc)->sk); \
167	struct smc_stats __percpu *_smc_stats = _net->smc.smc_stats; \
168	typeof(_is_add) is_add = (_is_add); \
169	typeof(_is_smcd) is_d = (_is_smcd); \
170	typeof(_is_rx) is_r = (_is_rx); \
171	typeof(_len) l = (_len); \
172	if ((is_d) && (is_r)) \
173		SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, rx, is_add, l); \
174	if ((is_d) && !(is_r)) \
175		SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_D, tx, is_add, l); \
176	if (!(is_d) && (is_r)) \
177		SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, rx, is_add, l); \
178	if (!(is_d) && !(is_r)) \
179		SMC_STAT_RMB_SIZE_SUB(_smc_stats, SMC_TYPE_R, tx, is_add, l); \
180} \
181while (0)
182
183#define SMC_STAT_RMB(_smc, type, _is_smcd, _is_rx) \
184do { \
185	struct net *net = sock_net(&(_smc)->sk); \
186	struct smc_stats __percpu *_smc_stats = net->smc.smc_stats; \
187	typeof(_is_smcd) is_d = (_is_smcd); \
188	typeof(_is_rx) is_r = (_is_rx); \
189	if ((is_d) && (is_r)) \
190		SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, rx); \
191	if ((is_d) && !(is_r)) \
192		SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_D, tx); \
193	if (!(is_d) && (is_r)) \
194		SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, rx); \
195	if (!(is_d) && !(is_r)) \
196		SMC_STAT_RMB_SUB(_smc_stats, type, SMC_TYPE_R, tx); \
197} \
198while (0)
199
200#define SMC_STAT_BUF_REUSE(smc, is_smcd, is_rx) \
201	SMC_STAT_RMB(smc, reuse, is_smcd, is_rx)
202
203#define SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rx) \
204	SMC_STAT_RMB(smc, alloc, is_smcd, is_rx)
205
206#define SMC_STAT_RMB_DOWNGRADED(smc, is_smcd, is_rx) \
207	SMC_STAT_RMB(smc, dgrade, is_smcd, is_rx)
208
209#define SMC_STAT_RMB_TX_PEER_FULL(smc, is_smcd) \
210	SMC_STAT_RMB(smc, buf_full_peer, is_smcd, false)
211
212#define SMC_STAT_RMB_TX_FULL(smc, is_smcd) \
213	SMC_STAT_RMB(smc, buf_full, is_smcd, false)
214
215#define SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, is_smcd) \
216	SMC_STAT_RMB(smc, buf_size_small_peer, is_smcd, false)
217
218#define SMC_STAT_RMB_TX_SIZE_SMALL(smc, is_smcd) \
219	SMC_STAT_RMB(smc, buf_size_small, is_smcd, false)
220
221#define SMC_STAT_RMB_RX_SIZE_SMALL(smc, is_smcd) \
222	SMC_STAT_RMB(smc, buf_size_small, is_smcd, true)
223
224#define SMC_STAT_RMB_RX_FULL(smc, is_smcd) \
225	SMC_STAT_RMB(smc, buf_full, is_smcd, true)
226
227#define SMC_STAT_INC(_smc, type) \
228do { \
229	typeof(_smc) __smc = _smc; \
230	bool is_smcd = !(__smc)->conn.lnk; \
231	struct net *net = sock_net(&(__smc)->sk); \
232	struct smc_stats __percpu *smc_stats = net->smc.smc_stats; \
233	if ((is_smcd)) \
234		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].type); \
235	else \
236		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].type); \
237} \
238while (0)
239
240#define SMC_STAT_CLNT_SUCC_INC(net, _aclc) \
241do { \
242	typeof(_aclc) acl = (_aclc); \
243	bool is_v2 = (acl->hdr.version == SMC_V2); \
244	bool is_smcd = (acl->hdr.typev1 == SMC_TYPE_D); \
245	struct smc_stats __percpu *smc_stats = (net)->smc.smc_stats; \
246	if (is_v2 && is_smcd) \
247		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v2_succ_cnt); \
248	else if (is_v2 && !is_smcd) \
249		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v2_succ_cnt); \
250	else if (!is_v2 && is_smcd) \
251		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].clnt_v1_succ_cnt); \
252	else if (!is_v2 && !is_smcd) \
253		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].clnt_v1_succ_cnt); \
254} \
255while (0)
256
257#define SMC_STAT_SERV_SUCC_INC(net, _ini) \
258do { \
259	typeof(_ini) i = (_ini); \
260	bool is_smcd = (i->is_smcd); \
261	u8 version = is_smcd ? i->smcd_version : i->smcr_version; \
262	bool is_v2 = (version & SMC_V2); \
263	typeof(net->smc.smc_stats) smc_stats = (net)->smc.smc_stats; \
264	if (is_v2 && is_smcd) \
265		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v2_succ_cnt); \
266	else if (is_v2 && !is_smcd) \
267		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v2_succ_cnt); \
268	else if (!is_v2 && is_smcd) \
269		this_cpu_inc(smc_stats->smc[SMC_TYPE_D].srv_v1_succ_cnt); \
270	else if (!is_v2 && !is_smcd) \
271		this_cpu_inc(smc_stats->smc[SMC_TYPE_R].srv_v1_succ_cnt); \
272} \
273while (0)
274
275int smc_nl_get_stats(struct sk_buff *skb, struct netlink_callback *cb);
276int smc_nl_get_fback_stats(struct sk_buff *skb, struct netlink_callback *cb);
277int smc_stats_init(struct net *net);
278void smc_stats_exit(struct net *net);
279
280#endif /* NET_SMC_SMC_STATS_H_ */