Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * The Huawei Cache Coherence System (HCCS) is a multi-chip interconnection
4 * bus protocol.
5 *
6 * Copyright (c) 2023 Hisilicon Limited.
7 * Author: Huisong Li <lihuisong@huawei.com>
8 *
9 * HCCS driver for Kunpeng SoC provides the following features:
10 * - Retrieve the following information about each port:
11 * - port type
12 * - lane mode
13 * - enable
14 * - current lane mode
15 * - link finite state machine
16 * - lane mask
17 * - CRC error count
18 *
19 * - Retrieve the following information about all the ports on the chip or
20 * the die:
21 * - if all enabled ports are in linked
22 * - if all linked ports are in full lane
23 * - CRC error count sum
24 *
25 * - Retrieve all HCCS types used on the platform.
26 *
27 * - Support low power feature for all specified HCCS type ports, and
28 * provide the following interface:
29 * - query HCCS types supported increasing and decreasing lane number.
30 * - decrease lane number of all specified HCCS type ports on idle state.
31 * - increase lane number of all specified HCCS type ports.
32 */
33#include <linux/acpi.h>
34#include <linux/delay.h>
35#include <linux/iopoll.h>
36#include <linux/platform_device.h>
37#include <linux/stringify.h>
38#include <linux/sysfs.h>
39#include <linux/types.h>
40
41#include <acpi/pcc.h>
42
43#include "kunpeng_hccs.h"
44
45/*
46 * Arbitrary retries in case the remote processor is slow to respond
47 * to PCC commands
48 */
49#define HCCS_PCC_CMD_WAIT_RETRIES_NUM 500ULL
50#define HCCS_POLL_STATUS_TIME_INTERVAL_US 3
51
52static struct hccs_port_info *kobj_to_port_info(struct kobject *k)
53{
54 return container_of(k, struct hccs_port_info, kobj);
55}
56
57static struct hccs_die_info *kobj_to_die_info(struct kobject *k)
58{
59 return container_of(k, struct hccs_die_info, kobj);
60}
61
62static struct hccs_chip_info *kobj_to_chip_info(struct kobject *k)
63{
64 return container_of(k, struct hccs_chip_info, kobj);
65}
66
67static struct hccs_dev *device_kobj_to_hccs_dev(struct kobject *k)
68{
69 struct device *dev = container_of(k, struct device, kobj);
70 struct platform_device *pdev =
71 container_of(dev, struct platform_device, dev);
72
73 return platform_get_drvdata(pdev);
74}
75
76static char *hccs_port_type_to_name(struct hccs_dev *hdev, u8 type)
77{
78 u16 i;
79
80 for (i = 0; i < hdev->used_type_num; i++) {
81 if (hdev->type_name_maps[i].type == type)
82 return hdev->type_name_maps[i].name;
83 }
84
85 return NULL;
86}
87
88static int hccs_name_to_port_type(struct hccs_dev *hdev,
89 const char *name, u8 *type)
90{
91 u16 i;
92
93 for (i = 0; i < hdev->used_type_num; i++) {
94 if (strcmp(hdev->type_name_maps[i].name, name) == 0) {
95 *type = hdev->type_name_maps[i].type;
96 return 0;
97 }
98 }
99
100 return -EINVAL;
101}
102
103struct hccs_register_ctx {
104 struct device *dev;
105 u8 chan_id;
106 int err;
107};
108
109static acpi_status hccs_get_register_cb(struct acpi_resource *ares,
110 void *context)
111{
112 struct acpi_resource_generic_register *reg;
113 struct hccs_register_ctx *ctx = context;
114
115 if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
116 return AE_OK;
117
118 reg = &ares->data.generic_reg;
119 if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM) {
120 dev_err(ctx->dev, "Bad register resource.\n");
121 ctx->err = -EINVAL;
122 return AE_ERROR;
123 }
124 ctx->chan_id = reg->access_size;
125
126 return AE_OK;
127}
128
129static int hccs_get_pcc_chan_id(struct hccs_dev *hdev)
130{
131 acpi_handle handle = ACPI_HANDLE(hdev->dev);
132 struct hccs_register_ctx ctx = {0};
133 acpi_status status;
134
135 if (!acpi_has_method(handle, METHOD_NAME__CRS)) {
136 dev_err(hdev->dev, "No _CRS method.\n");
137 return -ENODEV;
138 }
139
140 ctx.dev = hdev->dev;
141 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
142 hccs_get_register_cb, &ctx);
143 if (ACPI_FAILURE(status))
144 return ctx.err;
145 hdev->chan_id = ctx.chan_id;
146
147 return 0;
148}
149
150static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
151{
152 if (ret < 0)
153 pr_debug("TX did not complete: CMD sent:0x%x, ret:%d\n",
154 *(u8 *)msg, ret);
155 else
156 pr_debug("TX completed. CMD sent:0x%x, ret:%d\n",
157 *(u8 *)msg, ret);
158}
159
160static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg)
161{
162 struct hccs_mbox_client_info *cl_info =
163 container_of(cl, struct hccs_mbox_client_info, client);
164
165 complete(&cl_info->done);
166}
167
168static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
169{
170 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
171
172 if (cl_info->pcc_comm_addr)
173 iounmap(cl_info->pcc_comm_addr);
174 pcc_mbox_free_channel(hdev->cl_info.pcc_chan);
175}
176
177static int hccs_register_pcc_channel(struct hccs_dev *hdev)
178{
179 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
180 struct mbox_client *cl = &cl_info->client;
181 struct pcc_mbox_chan *pcc_chan;
182 struct device *dev = hdev->dev;
183 int rc;
184
185 cl->dev = dev;
186 cl->tx_block = false;
187 cl->knows_txdone = true;
188 cl->tx_done = hccs_chan_tx_done;
189 cl->rx_callback = hdev->verspec_data->rx_callback;
190 init_completion(&cl_info->done);
191
192 pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id);
193 if (IS_ERR(pcc_chan)) {
194 dev_err(dev, "PCC channel request failed.\n");
195 rc = -ENODEV;
196 goto out;
197 }
198 cl_info->pcc_chan = pcc_chan;
199 cl_info->mbox_chan = pcc_chan->mchan;
200
201 /*
202 * pcc_chan->latency is just a nominal value. In reality the remote
203 * processor could be much slower to reply. So add an arbitrary amount
204 * of wait on top of nominal.
205 */
206 cl_info->deadline_us =
207 HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
208 if (!hdev->verspec_data->has_txdone_irq &&
209 cl_info->mbox_chan->mbox->txdone_irq) {
210 dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
211 rc = -EINVAL;
212 goto err_mbx_channel_free;
213 } else if (hdev->verspec_data->has_txdone_irq &&
214 !cl_info->mbox_chan->mbox->txdone_irq) {
215 dev_err(dev, "PCC IRQ in PCCT isn't supported.\n");
216 rc = -EINVAL;
217 goto err_mbx_channel_free;
218 }
219
220 if (!pcc_chan->shmem_base_addr ||
221 pcc_chan->shmem_size != HCCS_PCC_SHARE_MEM_BYTES) {
222 dev_err(dev, "The base address or size (%llu) of PCC communication region is invalid.\n",
223 pcc_chan->shmem_size);
224 rc = -EINVAL;
225 goto err_mbx_channel_free;
226 }
227
228 cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr,
229 pcc_chan->shmem_size);
230 if (!cl_info->pcc_comm_addr) {
231 dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n",
232 hdev->chan_id);
233 rc = -ENOMEM;
234 goto err_mbx_channel_free;
235 }
236
237 return 0;
238
239err_mbx_channel_free:
240 pcc_mbox_free_channel(cl_info->pcc_chan);
241out:
242 return rc;
243}
244
245static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev)
246{
247 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
248 struct acpi_pcct_shared_memory __iomem *comm_base =
249 cl_info->pcc_comm_addr;
250 u16 status;
251 int ret;
252
253 /*
254 * Poll PCC status register every 3us(delay_us) for maximum of
255 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
256 */
257 ret = readw_poll_timeout(&comm_base->status, status,
258 status & PCC_STATUS_CMD_COMPLETE,
259 HCCS_POLL_STATUS_TIME_INTERVAL_US,
260 cl_info->deadline_us);
261 if (unlikely(ret))
262 dev_err(hdev->dev, "poll PCC status failed, ret = %d.\n", ret);
263
264 return ret;
265}
266
267static int hccs_wait_cmd_complete_by_irq(struct hccs_dev *hdev)
268{
269 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
270
271 if (!wait_for_completion_timeout(&cl_info->done,
272 usecs_to_jiffies(cl_info->deadline_us))) {
273 dev_err(hdev->dev, "PCC command executed timeout!\n");
274 return -ETIMEDOUT;
275 }
276
277 return 0;
278}
279
280static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev,
281 u8 cmd,
282 struct hccs_desc *desc,
283 void __iomem *comm_space,
284 u16 space_size)
285{
286 struct acpi_pcct_shared_memory tmp = {
287 .signature = PCC_SIGNATURE | hdev->chan_id,
288 .command = cmd,
289 .status = 0,
290 };
291
292 memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
293 sizeof(struct acpi_pcct_shared_memory));
294
295 /* Copy the message to the PCC comm space */
296 memcpy_toio(comm_space, (void *)desc, space_size);
297}
298
299static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev,
300 u8 cmd,
301 struct hccs_desc *desc,
302 void __iomem *comm_space,
303 u16 space_size)
304{
305 struct acpi_pcct_ext_pcc_shared_memory tmp = {
306 .signature = PCC_SIGNATURE | hdev->chan_id,
307 .flags = PCC_CMD_COMPLETION_NOTIFY,
308 .length = HCCS_PCC_SHARE_MEM_BYTES,
309 .command = cmd,
310 };
311
312 memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
313 sizeof(struct acpi_pcct_ext_pcc_shared_memory));
314
315 /* Copy the message to the PCC comm space */
316 memcpy_toio(comm_space, (void *)desc, space_size);
317}
318
319static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
320 struct hccs_desc *desc)
321{
322 const struct hccs_verspecific_data *verspec_data = hdev->verspec_data;
323 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
324 struct hccs_fw_inner_head *fw_inner_head;
325 void __iomem *comm_space;
326 u16 space_size;
327 int ret;
328
329 comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size;
330 space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size;
331 verspec_data->fill_pcc_shared_mem(hdev, cmd, desc,
332 comm_space, space_size);
333 if (verspec_data->has_txdone_irq)
334 reinit_completion(&cl_info->done);
335
336 /* Ring doorbell */
337 ret = mbox_send_message(cl_info->mbox_chan, &cmd);
338 if (ret < 0) {
339 dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n",
340 ret);
341 goto end;
342 }
343
344 ret = verspec_data->wait_cmd_complete(hdev);
345 if (ret)
346 goto end;
347
348 /* Copy response data */
349 memcpy_fromio((void *)desc, comm_space, space_size);
350 fw_inner_head = &desc->rsp.fw_inner_head;
351 if (fw_inner_head->retStatus) {
352 dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n",
353 fw_inner_head->retStatus);
354 ret = -EIO;
355 }
356
357end:
358 if (verspec_data->has_txdone_irq)
359 mbox_chan_txdone(cl_info->mbox_chan, ret);
360 else
361 mbox_client_txdone(cl_info->mbox_chan, ret);
362 return ret;
363}
364
365static void hccs_init_req_desc(struct hccs_desc *desc)
366{
367 struct hccs_req_desc *req = &desc->req;
368
369 memset(desc, 0, sizeof(*desc));
370 req->req_head.module_code = HCCS_SERDES_MODULE_CODE;
371}
372
373static int hccs_get_dev_caps(struct hccs_dev *hdev)
374{
375 struct hccs_desc desc;
376 int ret;
377
378 hccs_init_req_desc(&desc);
379 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DEV_CAP, &desc);
380 if (ret) {
381 dev_err(hdev->dev, "Get device capabilities failed, ret = %d.\n",
382 ret);
383 return ret;
384 }
385 memcpy(&hdev->caps, desc.rsp.data, sizeof(hdev->caps));
386
387 return 0;
388}
389
390static int hccs_query_chip_num_on_platform(struct hccs_dev *hdev)
391{
392 struct hccs_desc desc;
393 int ret;
394
395 hccs_init_req_desc(&desc);
396 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_CHIP_NUM, &desc);
397 if (ret) {
398 dev_err(hdev->dev, "query system chip number failed, ret = %d.\n",
399 ret);
400 return ret;
401 }
402
403 hdev->chip_num = *((u8 *)&desc.rsp.data);
404 if (!hdev->chip_num) {
405 dev_err(hdev->dev, "chip num obtained from firmware is zero.\n");
406 return -EINVAL;
407 }
408
409 return 0;
410}
411
412static int hccs_get_chip_info(struct hccs_dev *hdev,
413 struct hccs_chip_info *chip)
414{
415 struct hccs_die_num_req_param *req_param;
416 struct hccs_desc desc;
417 int ret;
418
419 hccs_init_req_desc(&desc);
420 req_param = (struct hccs_die_num_req_param *)desc.req.data;
421 req_param->chip_id = chip->chip_id;
422 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_NUM, &desc);
423 if (ret)
424 return ret;
425
426 chip->die_num = *((u8 *)&desc.rsp.data);
427
428 return 0;
429}
430
431static int hccs_query_chip_info_on_platform(struct hccs_dev *hdev)
432{
433 struct hccs_chip_info *chip;
434 int ret;
435 u8 idx;
436
437 ret = hccs_query_chip_num_on_platform(hdev);
438 if (ret) {
439 dev_err(hdev->dev, "query chip number on platform failed, ret = %d.\n",
440 ret);
441 return ret;
442 }
443
444 hdev->chips = devm_kzalloc(hdev->dev,
445 hdev->chip_num * sizeof(struct hccs_chip_info),
446 GFP_KERNEL);
447 if (!hdev->chips) {
448 dev_err(hdev->dev, "allocate all chips memory failed.\n");
449 return -ENOMEM;
450 }
451
452 for (idx = 0; idx < hdev->chip_num; idx++) {
453 chip = &hdev->chips[idx];
454 chip->chip_id = idx;
455 ret = hccs_get_chip_info(hdev, chip);
456 if (ret) {
457 dev_err(hdev->dev, "get chip%u info failed, ret = %d.\n",
458 idx, ret);
459 return ret;
460 }
461 chip->hdev = hdev;
462 }
463
464 return 0;
465}
466
467static int hccs_query_die_info_on_chip(struct hccs_dev *hdev, u8 chip_id,
468 u8 die_idx, struct hccs_die_info *die)
469{
470 struct hccs_die_info_req_param *req_param;
471 struct hccs_die_info_rsp_data *rsp_data;
472 struct hccs_desc desc;
473 int ret;
474
475 hccs_init_req_desc(&desc);
476 req_param = (struct hccs_die_info_req_param *)desc.req.data;
477 req_param->chip_id = chip_id;
478 req_param->die_idx = die_idx;
479 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_INFO, &desc);
480 if (ret)
481 return ret;
482
483 rsp_data = (struct hccs_die_info_rsp_data *)desc.rsp.data;
484 die->die_id = rsp_data->die_id;
485 die->port_num = rsp_data->port_num;
486 die->min_port_id = rsp_data->min_port_id;
487 die->max_port_id = rsp_data->max_port_id;
488 if (die->min_port_id > die->max_port_id) {
489 dev_err(hdev->dev, "min port id(%u) > max port id(%u) on die_idx(%u).\n",
490 die->min_port_id, die->max_port_id, die_idx);
491 return -EINVAL;
492 }
493 if (die->max_port_id > HCCS_DIE_MAX_PORT_ID) {
494 dev_err(hdev->dev, "max port id(%u) on die_idx(%u) is too big.\n",
495 die->max_port_id, die_idx);
496 return -EINVAL;
497 }
498
499 return 0;
500}
501
502static int hccs_query_all_die_info_on_platform(struct hccs_dev *hdev)
503{
504 struct device *dev = hdev->dev;
505 struct hccs_chip_info *chip;
506 struct hccs_die_info *die;
507 bool has_die_info = false;
508 u8 i, j;
509 int ret;
510
511 for (i = 0; i < hdev->chip_num; i++) {
512 chip = &hdev->chips[i];
513 if (!chip->die_num)
514 continue;
515
516 has_die_info = true;
517 chip->dies = devm_kzalloc(hdev->dev,
518 chip->die_num * sizeof(struct hccs_die_info),
519 GFP_KERNEL);
520 if (!chip->dies) {
521 dev_err(dev, "allocate all dies memory on chip%u failed.\n",
522 i);
523 return -ENOMEM;
524 }
525
526 for (j = 0; j < chip->die_num; j++) {
527 die = &chip->dies[j];
528 ret = hccs_query_die_info_on_chip(hdev, i, j, die);
529 if (ret) {
530 dev_err(dev, "get die idx (%u) info on chip%u failed, ret = %d.\n",
531 j, i, ret);
532 return ret;
533 }
534 die->chip = chip;
535 }
536 }
537
538 return has_die_info ? 0 : -EINVAL;
539}
540
541static int hccs_get_bd_info(struct hccs_dev *hdev, u8 opcode,
542 struct hccs_desc *desc,
543 void *buf, size_t buf_len,
544 struct hccs_rsp_head *rsp_head)
545{
546 struct hccs_rsp_head *head;
547 struct hccs_rsp_desc *rsp;
548 int ret;
549
550 ret = hccs_pcc_cmd_send(hdev, opcode, desc);
551 if (ret)
552 return ret;
553
554 rsp = &desc->rsp;
555 head = &rsp->rsp_head;
556 if (head->data_len > buf_len) {
557 dev_err(hdev->dev,
558 "buffer overflow (buf_len = %zu, data_len = %u)!\n",
559 buf_len, head->data_len);
560 return -ENOMEM;
561 }
562
563 memcpy(buf, rsp->data, head->data_len);
564 *rsp_head = *head;
565
566 return 0;
567}
568
569static int hccs_get_all_port_attr(struct hccs_dev *hdev,
570 struct hccs_die_info *die,
571 struct hccs_port_attr *attrs, u16 size)
572{
573 struct hccs_die_comm_req_param *req_param;
574 struct hccs_req_head *req_head;
575 struct hccs_rsp_head rsp_head;
576 struct hccs_desc desc;
577 size_t left_buf_len;
578 u32 data_len = 0;
579 u8 start_id;
580 u8 *buf;
581 int ret;
582
583 buf = (u8 *)attrs;
584 left_buf_len = sizeof(struct hccs_port_attr) * size;
585 start_id = die->min_port_id;
586 while (start_id <= die->max_port_id) {
587 hccs_init_req_desc(&desc);
588 req_head = &desc.req.req_head;
589 req_head->start_id = start_id;
590 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
591 req_param->chip_id = die->chip->chip_id;
592 req_param->die_id = die->die_id;
593
594 ret = hccs_get_bd_info(hdev, HCCS_GET_DIE_PORT_INFO, &desc,
595 buf + data_len, left_buf_len, &rsp_head);
596 if (ret) {
597 dev_err(hdev->dev,
598 "get the information of port%u on die%u failed, ret = %d.\n",
599 start_id, die->die_id, ret);
600 return ret;
601 }
602
603 data_len += rsp_head.data_len;
604 left_buf_len -= rsp_head.data_len;
605 if (unlikely(rsp_head.next_id <= start_id)) {
606 dev_err(hdev->dev,
607 "next port id (%u) is not greater than last start id (%u) on die%u.\n",
608 rsp_head.next_id, start_id, die->die_id);
609 return -EINVAL;
610 }
611 start_id = rsp_head.next_id;
612 }
613
614 if (left_buf_len != 0) {
615 dev_err(hdev->dev, "failed to get the expected port number(%u) attribute.\n",
616 size);
617 return -EINVAL;
618 }
619
620 return 0;
621}
622
623static int hccs_get_all_port_info_on_die(struct hccs_dev *hdev,
624 struct hccs_die_info *die)
625{
626 struct hccs_port_attr *attrs;
627 struct hccs_port_info *port;
628 int ret;
629 u8 i;
630
631 attrs = kcalloc(die->port_num, sizeof(struct hccs_port_attr),
632 GFP_KERNEL);
633 if (!attrs)
634 return -ENOMEM;
635
636 ret = hccs_get_all_port_attr(hdev, die, attrs, die->port_num);
637 if (ret)
638 goto out;
639
640 for (i = 0; i < die->port_num; i++) {
641 port = &die->ports[i];
642 port->port_id = attrs[i].port_id;
643 port->port_type = attrs[i].port_type;
644 port->max_lane_num = attrs[i].max_lane_num;
645 port->enable = attrs[i].enable;
646 port->die = die;
647 }
648
649out:
650 kfree(attrs);
651 return ret;
652}
653
654static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
655{
656 struct device *dev = hdev->dev;
657 struct hccs_chip_info *chip;
658 struct hccs_die_info *die;
659 bool has_port_info = false;
660 u8 i, j;
661 int ret;
662
663 for (i = 0; i < hdev->chip_num; i++) {
664 chip = &hdev->chips[i];
665 for (j = 0; j < chip->die_num; j++) {
666 die = &chip->dies[j];
667 if (!die->port_num)
668 continue;
669
670 has_port_info = true;
671 die->ports = devm_kzalloc(dev,
672 die->port_num * sizeof(struct hccs_port_info),
673 GFP_KERNEL);
674 if (!die->ports) {
675 dev_err(dev, "allocate ports memory on chip%u/die%u failed.\n",
676 i, die->die_id);
677 return -ENOMEM;
678 }
679
680 ret = hccs_get_all_port_info_on_die(hdev, die);
681 if (ret) {
682 dev_err(dev, "get all port info on chip%u/die%u failed, ret = %d.\n",
683 i, die->die_id, ret);
684 return ret;
685 }
686 }
687 }
688
689 return has_port_info ? 0 : -EINVAL;
690}
691
692static int hccs_get_hw_info(struct hccs_dev *hdev)
693{
694 int ret;
695
696 ret = hccs_query_chip_info_on_platform(hdev);
697 if (ret) {
698 dev_err(hdev->dev, "query chip info on platform failed, ret = %d.\n",
699 ret);
700 return ret;
701 }
702
703 ret = hccs_query_all_die_info_on_platform(hdev);
704 if (ret) {
705 dev_err(hdev->dev, "query all die info on platform failed, ret = %d.\n",
706 ret);
707 return ret;
708 }
709
710 ret = hccs_query_all_port_info_on_platform(hdev);
711 if (ret) {
712 dev_err(hdev->dev, "query all port info on platform failed, ret = %d.\n",
713 ret);
714 return ret;
715 }
716
717 return 0;
718}
719
720static u16 hccs_calc_used_type_num(struct hccs_dev *hdev,
721 unsigned long *hccs_ver)
722{
723 struct hccs_chip_info *chip;
724 struct hccs_port_info *port;
725 struct hccs_die_info *die;
726 u16 used_type_num = 0;
727 u16 i, j, k;
728
729 for (i = 0; i < hdev->chip_num; i++) {
730 chip = &hdev->chips[i];
731 for (j = 0; j < chip->die_num; j++) {
732 die = &chip->dies[j];
733 for (k = 0; k < die->port_num; k++) {
734 port = &die->ports[k];
735 set_bit(port->port_type, hccs_ver);
736 }
737 }
738 }
739
740 for_each_set_bit(i, hccs_ver, HCCS_IP_MAX + 1)
741 used_type_num++;
742
743 return used_type_num;
744}
745
746static int hccs_init_type_name_maps(struct hccs_dev *hdev)
747{
748 DECLARE_BITMAP(hccs_ver, HCCS_IP_MAX + 1) = {};
749 unsigned int i;
750 u16 idx = 0;
751
752 hdev->used_type_num = hccs_calc_used_type_num(hdev, hccs_ver);
753 hdev->type_name_maps = devm_kcalloc(hdev->dev, hdev->used_type_num,
754 sizeof(struct hccs_type_name_map),
755 GFP_KERNEL);
756 if (!hdev->type_name_maps)
757 return -ENOMEM;
758
759 for_each_set_bit(i, hccs_ver, HCCS_IP_MAX + 1) {
760 hdev->type_name_maps[idx].type = i;
761 sprintf(hdev->type_name_maps[idx].name,
762 "%s%u", HCCS_IP_PREFIX, i);
763 idx++;
764 }
765
766 return 0;
767}
768
769static int hccs_query_port_link_status(struct hccs_dev *hdev,
770 const struct hccs_port_info *port,
771 struct hccs_link_status *link_status)
772{
773 const struct hccs_die_info *die = port->die;
774 const struct hccs_chip_info *chip = die->chip;
775 struct hccs_port_comm_req_param *req_param;
776 struct hccs_desc desc;
777 int ret;
778
779 hccs_init_req_desc(&desc);
780 req_param = (struct hccs_port_comm_req_param *)desc.req.data;
781 req_param->chip_id = chip->chip_id;
782 req_param->die_id = die->die_id;
783 req_param->port_id = port->port_id;
784 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_LINK_STATUS, &desc);
785 if (ret) {
786 dev_err(hdev->dev,
787 "get port link status info failed, ret = %d.\n", ret);
788 return ret;
789 }
790
791 *link_status = *((struct hccs_link_status *)desc.rsp.data);
792
793 return 0;
794}
795
796static int hccs_query_port_crc_err_cnt(struct hccs_dev *hdev,
797 const struct hccs_port_info *port,
798 u64 *crc_err_cnt)
799{
800 const struct hccs_die_info *die = port->die;
801 const struct hccs_chip_info *chip = die->chip;
802 struct hccs_port_comm_req_param *req_param;
803 struct hccs_desc desc;
804 int ret;
805
806 hccs_init_req_desc(&desc);
807 req_param = (struct hccs_port_comm_req_param *)desc.req.data;
808 req_param->chip_id = chip->chip_id;
809 req_param->die_id = die->die_id;
810 req_param->port_id = port->port_id;
811 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_CRC_ERR_CNT, &desc);
812 if (ret) {
813 dev_err(hdev->dev,
814 "get port crc error count failed, ret = %d.\n", ret);
815 return ret;
816 }
817
818 memcpy(crc_err_cnt, &desc.rsp.data, sizeof(u64));
819
820 return 0;
821}
822
823static int hccs_get_die_all_link_status(struct hccs_dev *hdev,
824 const struct hccs_die_info *die,
825 u8 *all_linked)
826{
827 struct hccs_die_comm_req_param *req_param;
828 struct hccs_desc desc;
829 int ret;
830
831 if (die->port_num == 0) {
832 *all_linked = 1;
833 return 0;
834 }
835
836 hccs_init_req_desc(&desc);
837 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
838 req_param->chip_id = die->chip->chip_id;
839 req_param->die_id = die->die_id;
840 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LINK_STA, &desc);
841 if (ret) {
842 dev_err(hdev->dev,
843 "get link status of all ports failed on die%u, ret = %d.\n",
844 die->die_id, ret);
845 return ret;
846 }
847
848 *all_linked = *((u8 *)&desc.rsp.data);
849
850 return 0;
851}
852
853static int hccs_get_die_all_port_lane_status(struct hccs_dev *hdev,
854 const struct hccs_die_info *die,
855 u8 *full_lane)
856{
857 struct hccs_die_comm_req_param *req_param;
858 struct hccs_desc desc;
859 int ret;
860
861 if (die->port_num == 0) {
862 *full_lane = 1;
863 return 0;
864 }
865
866 hccs_init_req_desc(&desc);
867 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
868 req_param->chip_id = die->chip->chip_id;
869 req_param->die_id = die->die_id;
870 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LANE_STA, &desc);
871 if (ret) {
872 dev_err(hdev->dev, "get lane status of all ports failed on die%u, ret = %d.\n",
873 die->die_id, ret);
874 return ret;
875 }
876
877 *full_lane = *((u8 *)&desc.rsp.data);
878
879 return 0;
880}
881
882static int hccs_get_die_total_crc_err_cnt(struct hccs_dev *hdev,
883 const struct hccs_die_info *die,
884 u64 *total_crc_err_cnt)
885{
886 struct hccs_die_comm_req_param *req_param;
887 struct hccs_desc desc;
888 int ret;
889
890 if (die->port_num == 0) {
891 *total_crc_err_cnt = 0;
892 return 0;
893 }
894
895 hccs_init_req_desc(&desc);
896 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
897 req_param->chip_id = die->chip->chip_id;
898 req_param->die_id = die->die_id;
899 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_CRC_ERR_CNT, &desc);
900 if (ret) {
901 dev_err(hdev->dev, "get crc error count sum failed on die%u, ret = %d.\n",
902 die->die_id, ret);
903 return ret;
904 }
905
906 memcpy(total_crc_err_cnt, &desc.rsp.data, sizeof(u64));
907
908 return 0;
909}
910
911static ssize_t hccs_show(struct kobject *k, struct attribute *attr, char *buf)
912{
913 struct kobj_attribute *kobj_attr;
914
915 kobj_attr = container_of(attr, struct kobj_attribute, attr);
916
917 return kobj_attr->show(k, kobj_attr, buf);
918}
919
920static const struct sysfs_ops hccs_comm_ops = {
921 .show = hccs_show,
922};
923
924static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
925 char *buf)
926{
927 const struct hccs_port_info *port = kobj_to_port_info(kobj);
928
929 return sysfs_emit(buf, "%s%u\n", HCCS_IP_PREFIX, port->port_type);
930}
931static struct kobj_attribute hccs_type_attr = __ATTR_RO(type);
932
933static ssize_t lane_mode_show(struct kobject *kobj, struct kobj_attribute *attr,
934 char *buf)
935{
936 const struct hccs_port_info *port = kobj_to_port_info(kobj);
937
938 return sysfs_emit(buf, "x%u\n", port->max_lane_num);
939}
940static struct kobj_attribute lane_mode_attr = __ATTR_RO(lane_mode);
941
942static ssize_t enable_show(struct kobject *kobj,
943 struct kobj_attribute *attr, char *buf)
944{
945 const struct hccs_port_info *port = kobj_to_port_info(kobj);
946
947 return sysfs_emit(buf, "%u\n", port->enable);
948}
949static struct kobj_attribute port_enable_attr = __ATTR_RO(enable);
950
951static ssize_t cur_lane_num_show(struct kobject *kobj,
952 struct kobj_attribute *attr, char *buf)
953{
954 const struct hccs_port_info *port = kobj_to_port_info(kobj);
955 struct hccs_dev *hdev = port->die->chip->hdev;
956 struct hccs_link_status link_status = {0};
957 int ret;
958
959 mutex_lock(&hdev->lock);
960 ret = hccs_query_port_link_status(hdev, port, &link_status);
961 mutex_unlock(&hdev->lock);
962 if (ret)
963 return ret;
964
965 return sysfs_emit(buf, "%u\n", link_status.lane_num);
966}
967static struct kobj_attribute cur_lane_num_attr = __ATTR_RO(cur_lane_num);
968
969static ssize_t link_fsm_show(struct kobject *kobj,
970 struct kobj_attribute *attr, char *buf)
971{
972 const struct hccs_port_info *port = kobj_to_port_info(kobj);
973 struct hccs_dev *hdev = port->die->chip->hdev;
974 struct hccs_link_status link_status = {0};
975 const struct {
976 u8 link_fsm;
977 char *str;
978 } link_fsm_map[] = {
979 {HCCS_PORT_RESET, "reset"},
980 {HCCS_PORT_SETUP, "setup"},
981 {HCCS_PORT_CONFIG, "config"},
982 {HCCS_PORT_READY, "link-up"},
983 };
984 const char *link_fsm_str = "unknown";
985 size_t i;
986 int ret;
987
988 mutex_lock(&hdev->lock);
989 ret = hccs_query_port_link_status(hdev, port, &link_status);
990 mutex_unlock(&hdev->lock);
991 if (ret)
992 return ret;
993
994 for (i = 0; i < ARRAY_SIZE(link_fsm_map); i++) {
995 if (link_fsm_map[i].link_fsm == link_status.link_fsm) {
996 link_fsm_str = link_fsm_map[i].str;
997 break;
998 }
999 }
1000
1001 return sysfs_emit(buf, "%s\n", link_fsm_str);
1002}
1003static struct kobj_attribute link_fsm_attr = __ATTR_RO(link_fsm);
1004
1005static ssize_t lane_mask_show(struct kobject *kobj,
1006 struct kobj_attribute *attr, char *buf)
1007{
1008 const struct hccs_port_info *port = kobj_to_port_info(kobj);
1009 struct hccs_dev *hdev = port->die->chip->hdev;
1010 struct hccs_link_status link_status = {0};
1011 int ret;
1012
1013 mutex_lock(&hdev->lock);
1014 ret = hccs_query_port_link_status(hdev, port, &link_status);
1015 mutex_unlock(&hdev->lock);
1016 if (ret)
1017 return ret;
1018
1019 return sysfs_emit(buf, "0x%x\n", link_status.lane_mask);
1020}
1021static struct kobj_attribute lane_mask_attr = __ATTR_RO(lane_mask);
1022
1023static ssize_t crc_err_cnt_show(struct kobject *kobj,
1024 struct kobj_attribute *attr, char *buf)
1025{
1026 const struct hccs_port_info *port = kobj_to_port_info(kobj);
1027 struct hccs_dev *hdev = port->die->chip->hdev;
1028 u64 crc_err_cnt;
1029 int ret;
1030
1031 mutex_lock(&hdev->lock);
1032 ret = hccs_query_port_crc_err_cnt(hdev, port, &crc_err_cnt);
1033 mutex_unlock(&hdev->lock);
1034 if (ret)
1035 return ret;
1036
1037 return sysfs_emit(buf, "%llu\n", crc_err_cnt);
1038}
1039static struct kobj_attribute crc_err_cnt_attr = __ATTR_RO(crc_err_cnt);
1040
1041static struct attribute *hccs_port_default_attrs[] = {
1042 &hccs_type_attr.attr,
1043 &lane_mode_attr.attr,
1044 &port_enable_attr.attr,
1045 &cur_lane_num_attr.attr,
1046 &link_fsm_attr.attr,
1047 &lane_mask_attr.attr,
1048 &crc_err_cnt_attr.attr,
1049 NULL,
1050};
1051ATTRIBUTE_GROUPS(hccs_port_default);
1052
1053static const struct kobj_type hccs_port_type = {
1054 .sysfs_ops = &hccs_comm_ops,
1055 .default_groups = hccs_port_default_groups,
1056};
1057
1058static ssize_t all_linked_on_die_show(struct kobject *kobj,
1059 struct kobj_attribute *attr, char *buf)
1060{
1061 const struct hccs_die_info *die = kobj_to_die_info(kobj);
1062 struct hccs_dev *hdev = die->chip->hdev;
1063 u8 all_linked;
1064 int ret;
1065
1066 mutex_lock(&hdev->lock);
1067 ret = hccs_get_die_all_link_status(hdev, die, &all_linked);
1068 mutex_unlock(&hdev->lock);
1069 if (ret)
1070 return ret;
1071
1072 return sysfs_emit(buf, "%u\n", all_linked);
1073}
1074static struct kobj_attribute all_linked_on_die_attr =
1075 __ATTR(all_linked, 0444, all_linked_on_die_show, NULL);
1076
1077static ssize_t linked_full_lane_on_die_show(struct kobject *kobj,
1078 struct kobj_attribute *attr,
1079 char *buf)
1080{
1081 const struct hccs_die_info *die = kobj_to_die_info(kobj);
1082 struct hccs_dev *hdev = die->chip->hdev;
1083 u8 full_lane;
1084 int ret;
1085
1086 mutex_lock(&hdev->lock);
1087 ret = hccs_get_die_all_port_lane_status(hdev, die, &full_lane);
1088 mutex_unlock(&hdev->lock);
1089 if (ret)
1090 return ret;
1091
1092 return sysfs_emit(buf, "%u\n", full_lane);
1093}
1094static struct kobj_attribute linked_full_lane_on_die_attr =
1095 __ATTR(linked_full_lane, 0444, linked_full_lane_on_die_show, NULL);
1096
1097static ssize_t crc_err_cnt_sum_on_die_show(struct kobject *kobj,
1098 struct kobj_attribute *attr,
1099 char *buf)
1100{
1101 const struct hccs_die_info *die = kobj_to_die_info(kobj);
1102 struct hccs_dev *hdev = die->chip->hdev;
1103 u64 total_crc_err_cnt;
1104 int ret;
1105
1106 mutex_lock(&hdev->lock);
1107 ret = hccs_get_die_total_crc_err_cnt(hdev, die, &total_crc_err_cnt);
1108 mutex_unlock(&hdev->lock);
1109 if (ret)
1110 return ret;
1111
1112 return sysfs_emit(buf, "%llu\n", total_crc_err_cnt);
1113}
1114static struct kobj_attribute crc_err_cnt_sum_on_die_attr =
1115 __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_die_show, NULL);
1116
1117static struct attribute *hccs_die_default_attrs[] = {
1118 &all_linked_on_die_attr.attr,
1119 &linked_full_lane_on_die_attr.attr,
1120 &crc_err_cnt_sum_on_die_attr.attr,
1121 NULL,
1122};
1123ATTRIBUTE_GROUPS(hccs_die_default);
1124
1125static const struct kobj_type hccs_die_type = {
1126 .sysfs_ops = &hccs_comm_ops,
1127 .default_groups = hccs_die_default_groups,
1128};
1129
1130static ssize_t all_linked_on_chip_show(struct kobject *kobj,
1131 struct kobj_attribute *attr, char *buf)
1132{
1133 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1134 struct hccs_dev *hdev = chip->hdev;
1135 const struct hccs_die_info *die;
1136 u8 all_linked = 1;
1137 u8 i, tmp;
1138 int ret;
1139
1140 mutex_lock(&hdev->lock);
1141 for (i = 0; i < chip->die_num; i++) {
1142 die = &chip->dies[i];
1143 ret = hccs_get_die_all_link_status(hdev, die, &tmp);
1144 if (ret) {
1145 mutex_unlock(&hdev->lock);
1146 return ret;
1147 }
1148 if (tmp != all_linked) {
1149 all_linked = 0;
1150 break;
1151 }
1152 }
1153 mutex_unlock(&hdev->lock);
1154
1155 return sysfs_emit(buf, "%u\n", all_linked);
1156}
1157static struct kobj_attribute all_linked_on_chip_attr =
1158 __ATTR(all_linked, 0444, all_linked_on_chip_show, NULL);
1159
1160static ssize_t linked_full_lane_on_chip_show(struct kobject *kobj,
1161 struct kobj_attribute *attr,
1162 char *buf)
1163{
1164 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1165 struct hccs_dev *hdev = chip->hdev;
1166 const struct hccs_die_info *die;
1167 u8 full_lane = 1;
1168 u8 i, tmp;
1169 int ret;
1170
1171 mutex_lock(&hdev->lock);
1172 for (i = 0; i < chip->die_num; i++) {
1173 die = &chip->dies[i];
1174 ret = hccs_get_die_all_port_lane_status(hdev, die, &tmp);
1175 if (ret) {
1176 mutex_unlock(&hdev->lock);
1177 return ret;
1178 }
1179 if (tmp != full_lane) {
1180 full_lane = 0;
1181 break;
1182 }
1183 }
1184 mutex_unlock(&hdev->lock);
1185
1186 return sysfs_emit(buf, "%u\n", full_lane);
1187}
1188static struct kobj_attribute linked_full_lane_on_chip_attr =
1189 __ATTR(linked_full_lane, 0444, linked_full_lane_on_chip_show, NULL);
1190
1191static ssize_t crc_err_cnt_sum_on_chip_show(struct kobject *kobj,
1192 struct kobj_attribute *attr,
1193 char *buf)
1194{
1195 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1196 u64 crc_err_cnt, total_crc_err_cnt = 0;
1197 struct hccs_dev *hdev = chip->hdev;
1198 const struct hccs_die_info *die;
1199 int ret;
1200 u16 i;
1201
1202 mutex_lock(&hdev->lock);
1203 for (i = 0; i < chip->die_num; i++) {
1204 die = &chip->dies[i];
1205 ret = hccs_get_die_total_crc_err_cnt(hdev, die, &crc_err_cnt);
1206 if (ret) {
1207 mutex_unlock(&hdev->lock);
1208 return ret;
1209 }
1210
1211 total_crc_err_cnt += crc_err_cnt;
1212 }
1213 mutex_unlock(&hdev->lock);
1214
1215 return sysfs_emit(buf, "%llu\n", total_crc_err_cnt);
1216}
1217static struct kobj_attribute crc_err_cnt_sum_on_chip_attr =
1218 __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_chip_show, NULL);
1219
1220static struct attribute *hccs_chip_default_attrs[] = {
1221 &all_linked_on_chip_attr.attr,
1222 &linked_full_lane_on_chip_attr.attr,
1223 &crc_err_cnt_sum_on_chip_attr.attr,
1224 NULL,
1225};
1226ATTRIBUTE_GROUPS(hccs_chip_default);
1227
1228static const struct kobj_type hccs_chip_type = {
1229 .sysfs_ops = &hccs_comm_ops,
1230 .default_groups = hccs_chip_default_groups,
1231};
1232
1233static int hccs_parse_pm_port_type(struct hccs_dev *hdev, const char *buf,
1234 u8 *port_type)
1235{
1236 char hccs_name[HCCS_NAME_MAX_LEN + 1] = "";
1237 u8 type;
1238 int ret;
1239
1240 ret = sscanf(buf, "%" __stringify(HCCS_NAME_MAX_LEN) "s", hccs_name);
1241 if (ret != 1)
1242 return -EINVAL;
1243
1244 ret = hccs_name_to_port_type(hdev, hccs_name, &type);
1245 if (ret) {
1246 dev_dbg(hdev->dev, "input invalid, please get the available types from 'used_types'.\n");
1247 return ret;
1248 }
1249
1250 if (type == HCCS_V2 && hdev->caps & HCCS_CAPS_HCCS_V2_PM) {
1251 *port_type = type;
1252 return 0;
1253 }
1254
1255 dev_dbg(hdev->dev, "%s doesn't support for increasing and decreasing lane.\n",
1256 hccs_name);
1257
1258 return -EOPNOTSUPP;
1259}
1260
1261static int hccs_query_port_idle_status(struct hccs_dev *hdev,
1262 struct hccs_port_info *port, u8 *idle)
1263{
1264 const struct hccs_die_info *die = port->die;
1265 const struct hccs_chip_info *chip = die->chip;
1266 struct hccs_port_comm_req_param *req_param;
1267 struct hccs_desc desc;
1268 int ret;
1269
1270 hccs_init_req_desc(&desc);
1271 req_param = (struct hccs_port_comm_req_param *)desc.req.data;
1272 req_param->chip_id = chip->chip_id;
1273 req_param->die_id = die->die_id;
1274 req_param->port_id = port->port_id;
1275 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_IDLE_STATUS, &desc);
1276 if (ret) {
1277 dev_err(hdev->dev,
1278 "get port idle status failed, ret = %d.\n", ret);
1279 return ret;
1280 }
1281
1282 *idle = *((u8 *)desc.rsp.data);
1283 return 0;
1284}
1285
1286static int hccs_get_all_spec_port_idle_sta(struct hccs_dev *hdev, u8 port_type,
1287 bool *all_idle)
1288{
1289 struct hccs_chip_info *chip;
1290 struct hccs_port_info *port;
1291 struct hccs_die_info *die;
1292 int ret = 0;
1293 u8 i, j, k;
1294 u8 idle;
1295
1296 *all_idle = false;
1297 for (i = 0; i < hdev->chip_num; i++) {
1298 chip = &hdev->chips[i];
1299 for (j = 0; j < chip->die_num; j++) {
1300 die = &chip->dies[j];
1301 for (k = 0; k < die->port_num; k++) {
1302 port = &die->ports[k];
1303 if (port->port_type != port_type)
1304 continue;
1305 ret = hccs_query_port_idle_status(hdev, port,
1306 &idle);
1307 if (ret) {
1308 dev_err(hdev->dev,
1309 "hccs%u on chip%u/die%u get idle status failed, ret = %d.\n",
1310 k, i, j, ret);
1311 return ret;
1312 } else if (idle == 0) {
1313 dev_info(hdev->dev, "hccs%u on chip%u/die%u is busy.\n",
1314 k, i, j);
1315 return 0;
1316 }
1317 }
1318 }
1319 }
1320 *all_idle = true;
1321
1322 return 0;
1323}
1324
1325static int hccs_get_all_spec_port_full_lane_sta(struct hccs_dev *hdev,
1326 u8 port_type, bool *full_lane)
1327{
1328 struct hccs_link_status status = {0};
1329 struct hccs_chip_info *chip;
1330 struct hccs_port_info *port;
1331 struct hccs_die_info *die;
1332 u8 i, j, k;
1333 int ret;
1334
1335 *full_lane = false;
1336 for (i = 0; i < hdev->chip_num; i++) {
1337 chip = &hdev->chips[i];
1338 for (j = 0; j < chip->die_num; j++) {
1339 die = &chip->dies[j];
1340 for (k = 0; k < die->port_num; k++) {
1341 port = &die->ports[k];
1342 if (port->port_type != port_type)
1343 continue;
1344 ret = hccs_query_port_link_status(hdev, port,
1345 &status);
1346 if (ret)
1347 return ret;
1348 if (status.lane_num != port->max_lane_num)
1349 return 0;
1350 }
1351 }
1352 }
1353 *full_lane = true;
1354
1355 return 0;
1356}
1357
1358static int hccs_prepare_inc_lane(struct hccs_dev *hdev, u8 type)
1359{
1360 struct hccs_inc_lane_req_param *req_param;
1361 struct hccs_desc desc;
1362 int ret;
1363
1364 hccs_init_req_desc(&desc);
1365 req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
1366 req_param->port_type = type;
1367 req_param->opt_type = HCCS_PREPARE_INC_LANE;
1368 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
1369 if (ret)
1370 dev_err(hdev->dev, "prepare for increasing lane failed, ret = %d.\n",
1371 ret);
1372
1373 return ret;
1374}
1375
1376static int hccs_wait_serdes_adapt_completed(struct hccs_dev *hdev, u8 type)
1377{
1378#define HCCS_MAX_WAIT_CNT_FOR_ADAPT 10
1379#define HCCS_QUERY_ADAPT_RES_DELAY_MS 100
1380#define HCCS_SERDES_ADAPT_OK 0
1381
1382 struct hccs_inc_lane_req_param *req_param;
1383 u8 wait_cnt = HCCS_MAX_WAIT_CNT_FOR_ADAPT;
1384 struct hccs_desc desc;
1385 u8 adapt_res;
1386 int ret;
1387
1388 do {
1389 hccs_init_req_desc(&desc);
1390 req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
1391 req_param->port_type = type;
1392 req_param->opt_type = HCCS_GET_ADAPT_RES;
1393 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
1394 if (ret) {
1395 dev_err(hdev->dev, "query adapting result failed, ret = %d.\n",
1396 ret);
1397 return ret;
1398 }
1399 adapt_res = *((u8 *)&desc.rsp.data);
1400 if (adapt_res == HCCS_SERDES_ADAPT_OK)
1401 return 0;
1402
1403 msleep(HCCS_QUERY_ADAPT_RES_DELAY_MS);
1404 } while (--wait_cnt);
1405
1406 dev_err(hdev->dev, "wait for adapting completed timeout.\n");
1407
1408 return -ETIMEDOUT;
1409}
1410
1411static int hccs_start_hpcs_retraining(struct hccs_dev *hdev, u8 type)
1412{
1413 struct hccs_inc_lane_req_param *req_param;
1414 struct hccs_desc desc;
1415 int ret;
1416
1417 hccs_init_req_desc(&desc);
1418 req_param = (struct hccs_inc_lane_req_param *)desc.req.data;
1419 req_param->port_type = type;
1420 req_param->opt_type = HCCS_START_RETRAINING;
1421 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_INC_LANE, &desc);
1422 if (ret)
1423 dev_err(hdev->dev, "start hpcs retraining failed, ret = %d.\n",
1424 ret);
1425
1426 return ret;
1427}
1428
1429static int hccs_start_inc_lane(struct hccs_dev *hdev, u8 type)
1430{
1431 int ret;
1432
1433 ret = hccs_prepare_inc_lane(hdev, type);
1434 if (ret)
1435 return ret;
1436
1437 ret = hccs_wait_serdes_adapt_completed(hdev, type);
1438 if (ret)
1439 return ret;
1440
1441 return hccs_start_hpcs_retraining(hdev, type);
1442}
1443
1444static int hccs_start_dec_lane(struct hccs_dev *hdev, u8 type)
1445{
1446 struct hccs_desc desc;
1447 u8 *port_type;
1448 int ret;
1449
1450 hccs_init_req_desc(&desc);
1451 port_type = (u8 *)desc.req.data;
1452 *port_type = type;
1453 ret = hccs_pcc_cmd_send(hdev, HCCS_PM_DEC_LANE, &desc);
1454 if (ret)
1455 dev_err(hdev->dev, "start to decrease lane failed, ret = %d.\n",
1456 ret);
1457
1458 return ret;
1459}
1460
1461static ssize_t dec_lane_of_type_store(struct kobject *kobj, struct kobj_attribute *attr,
1462 const char *buf, size_t count)
1463{
1464 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
1465 bool all_in_idle;
1466 u8 port_type;
1467 int ret;
1468
1469 ret = hccs_parse_pm_port_type(hdev, buf, &port_type);
1470 if (ret)
1471 return ret;
1472
1473 mutex_lock(&hdev->lock);
1474 ret = hccs_get_all_spec_port_idle_sta(hdev, port_type, &all_in_idle);
1475 if (ret)
1476 goto out;
1477 if (!all_in_idle) {
1478 ret = -EBUSY;
1479 dev_err(hdev->dev, "please don't decrese lanes on high load with %s, ret = %d.\n",
1480 hccs_port_type_to_name(hdev, port_type), ret);
1481 goto out;
1482 }
1483
1484 ret = hccs_start_dec_lane(hdev, port_type);
1485out:
1486 mutex_unlock(&hdev->lock);
1487
1488 return ret == 0 ? count : ret;
1489}
1490static struct kobj_attribute dec_lane_of_type_attr =
1491 __ATTR(dec_lane_of_type, 0200, NULL, dec_lane_of_type_store);
1492
1493static ssize_t inc_lane_of_type_store(struct kobject *kobj, struct kobj_attribute *attr,
1494 const char *buf, size_t count)
1495{
1496 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
1497 bool full_lane;
1498 u8 port_type;
1499 int ret;
1500
1501 ret = hccs_parse_pm_port_type(hdev, buf, &port_type);
1502 if (ret)
1503 return ret;
1504
1505 mutex_lock(&hdev->lock);
1506 ret = hccs_get_all_spec_port_full_lane_sta(hdev, port_type, &full_lane);
1507 if (ret || full_lane)
1508 goto out;
1509
1510 ret = hccs_start_inc_lane(hdev, port_type);
1511out:
1512 mutex_unlock(&hdev->lock);
1513 return ret == 0 ? count : ret;
1514}
1515static struct kobj_attribute inc_lane_of_type_attr =
1516 __ATTR(inc_lane_of_type, 0200, NULL, inc_lane_of_type_store);
1517
1518static ssize_t available_inc_dec_lane_types_show(struct kobject *kobj,
1519 struct kobj_attribute *attr,
1520 char *buf)
1521{
1522 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
1523
1524 if (hdev->caps & HCCS_CAPS_HCCS_V2_PM)
1525 return sysfs_emit(buf, "%s\n",
1526 hccs_port_type_to_name(hdev, HCCS_V2));
1527
1528 return -EINVAL;
1529}
1530static struct kobj_attribute available_inc_dec_lane_types_attr =
1531 __ATTR(available_inc_dec_lane_types, 0444,
1532 available_inc_dec_lane_types_show, NULL);
1533
1534static ssize_t used_types_show(struct kobject *kobj,
1535 struct kobj_attribute *attr, char *buf)
1536{
1537 struct hccs_dev *hdev = device_kobj_to_hccs_dev(kobj);
1538 int len = 0;
1539 u16 i;
1540
1541 for (i = 0; i < hdev->used_type_num - 1; i++)
1542 len += sysfs_emit(&buf[len], "%s ", hdev->type_name_maps[i].name);
1543 len += sysfs_emit(&buf[len], "%s\n", hdev->type_name_maps[i].name);
1544
1545 return len;
1546}
1547static struct kobj_attribute used_types_attr =
1548 __ATTR(used_types, 0444, used_types_show, NULL);
1549
1550static void hccs_remove_misc_sysfs(struct hccs_dev *hdev)
1551{
1552 sysfs_remove_file(&hdev->dev->kobj, &used_types_attr.attr);
1553
1554 if (!(hdev->caps & HCCS_CAPS_HCCS_V2_PM))
1555 return;
1556
1557 sysfs_remove_file(&hdev->dev->kobj,
1558 &available_inc_dec_lane_types_attr.attr);
1559 sysfs_remove_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
1560 sysfs_remove_file(&hdev->dev->kobj, &inc_lane_of_type_attr.attr);
1561}
1562
1563static int hccs_add_misc_sysfs(struct hccs_dev *hdev)
1564{
1565 int ret;
1566
1567 ret = sysfs_create_file(&hdev->dev->kobj, &used_types_attr.attr);
1568 if (ret)
1569 return ret;
1570
1571 if (!(hdev->caps & HCCS_CAPS_HCCS_V2_PM))
1572 return 0;
1573
1574 ret = sysfs_create_file(&hdev->dev->kobj,
1575 &available_inc_dec_lane_types_attr.attr);
1576 if (ret)
1577 goto used_types_remove;
1578
1579 ret = sysfs_create_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
1580 if (ret)
1581 goto inc_dec_lane_types_remove;
1582
1583 ret = sysfs_create_file(&hdev->dev->kobj, &inc_lane_of_type_attr.attr);
1584 if (ret)
1585 goto dec_lane_of_type_remove;
1586
1587 return 0;
1588
1589dec_lane_of_type_remove:
1590 sysfs_remove_file(&hdev->dev->kobj, &dec_lane_of_type_attr.attr);
1591inc_dec_lane_types_remove:
1592 sysfs_remove_file(&hdev->dev->kobj,
1593 &available_inc_dec_lane_types_attr.attr);
1594used_types_remove:
1595 sysfs_remove_file(&hdev->dev->kobj, &used_types_attr.attr);
1596 return ret;
1597}
1598
1599static void hccs_remove_die_dir(struct hccs_die_info *die)
1600{
1601 struct hccs_port_info *port;
1602 u8 i;
1603
1604 for (i = 0; i < die->port_num; i++) {
1605 port = &die->ports[i];
1606 if (port->dir_created)
1607 kobject_put(&port->kobj);
1608 }
1609
1610 kobject_put(&die->kobj);
1611}
1612
1613static void hccs_remove_chip_dir(struct hccs_chip_info *chip)
1614{
1615 struct hccs_die_info *die;
1616 u8 i;
1617
1618 for (i = 0; i < chip->die_num; i++) {
1619 die = &chip->dies[i];
1620 if (die->dir_created)
1621 hccs_remove_die_dir(die);
1622 }
1623
1624 kobject_put(&chip->kobj);
1625}
1626
1627static void hccs_remove_topo_dirs(struct hccs_dev *hdev)
1628{
1629 u8 i;
1630
1631 for (i = 0; i < hdev->chip_num; i++)
1632 hccs_remove_chip_dir(&hdev->chips[i]);
1633
1634 hccs_remove_misc_sysfs(hdev);
1635}
1636
1637static int hccs_create_hccs_dir(struct hccs_dev *hdev,
1638 struct hccs_die_info *die,
1639 struct hccs_port_info *port)
1640{
1641 int ret;
1642
1643 ret = kobject_init_and_add(&port->kobj, &hccs_port_type,
1644 &die->kobj, "hccs%u", port->port_id);
1645 if (ret) {
1646 kobject_put(&port->kobj);
1647 return ret;
1648 }
1649
1650 return 0;
1651}
1652
1653static int hccs_create_die_dir(struct hccs_dev *hdev,
1654 struct hccs_chip_info *chip,
1655 struct hccs_die_info *die)
1656{
1657 struct hccs_port_info *port;
1658 int ret;
1659 u16 i;
1660
1661 ret = kobject_init_and_add(&die->kobj, &hccs_die_type,
1662 &chip->kobj, "die%u", die->die_id);
1663 if (ret) {
1664 kobject_put(&die->kobj);
1665 return ret;
1666 }
1667
1668 for (i = 0; i < die->port_num; i++) {
1669 port = &die->ports[i];
1670 ret = hccs_create_hccs_dir(hdev, die, port);
1671 if (ret) {
1672 dev_err(hdev->dev, "create hccs%u dir failed.\n",
1673 port->port_id);
1674 goto err;
1675 }
1676 port->dir_created = true;
1677 }
1678
1679 return 0;
1680err:
1681 hccs_remove_die_dir(die);
1682
1683 return ret;
1684}
1685
1686static int hccs_create_chip_dir(struct hccs_dev *hdev,
1687 struct hccs_chip_info *chip)
1688{
1689 struct hccs_die_info *die;
1690 int ret;
1691 u16 id;
1692
1693 ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type,
1694 &hdev->dev->kobj, "chip%u", chip->chip_id);
1695 if (ret) {
1696 kobject_put(&chip->kobj);
1697 return ret;
1698 }
1699
1700 for (id = 0; id < chip->die_num; id++) {
1701 die = &chip->dies[id];
1702 ret = hccs_create_die_dir(hdev, chip, die);
1703 if (ret)
1704 goto err;
1705 die->dir_created = true;
1706 }
1707
1708 return 0;
1709err:
1710 hccs_remove_chip_dir(chip);
1711
1712 return ret;
1713}
1714
1715static int hccs_create_topo_dirs(struct hccs_dev *hdev)
1716{
1717 struct hccs_chip_info *chip;
1718 u8 id, k;
1719 int ret;
1720
1721 for (id = 0; id < hdev->chip_num; id++) {
1722 chip = &hdev->chips[id];
1723 ret = hccs_create_chip_dir(hdev, chip);
1724 if (ret) {
1725 dev_err(hdev->dev, "init chip%u dir failed!\n", id);
1726 goto err;
1727 }
1728 }
1729
1730 ret = hccs_add_misc_sysfs(hdev);
1731 if (ret) {
1732 dev_err(hdev->dev, "create misc sysfs interface failed, ret = %d\n", ret);
1733 goto err;
1734 }
1735
1736 return 0;
1737err:
1738 for (k = 0; k < id; k++)
1739 hccs_remove_chip_dir(&hdev->chips[k]);
1740
1741 return ret;
1742}
1743
1744static int hccs_probe(struct platform_device *pdev)
1745{
1746 struct acpi_device *acpi_dev;
1747 struct hccs_dev *hdev;
1748 int rc;
1749
1750 if (acpi_disabled) {
1751 dev_err(&pdev->dev, "acpi is disabled.\n");
1752 return -ENODEV;
1753 }
1754 acpi_dev = ACPI_COMPANION(&pdev->dev);
1755 if (!acpi_dev)
1756 return -ENODEV;
1757
1758 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1759 if (!hdev)
1760 return -ENOMEM;
1761 hdev->acpi_dev = acpi_dev;
1762 hdev->dev = &pdev->dev;
1763 platform_set_drvdata(pdev, hdev);
1764
1765 /*
1766 * Here would never be failure as the driver and device has been matched.
1767 */
1768 hdev->verspec_data = acpi_device_get_match_data(hdev->dev);
1769
1770 mutex_init(&hdev->lock);
1771 rc = hccs_get_pcc_chan_id(hdev);
1772 if (rc)
1773 return rc;
1774 rc = hccs_register_pcc_channel(hdev);
1775 if (rc)
1776 return rc;
1777
1778 rc = hccs_get_dev_caps(hdev);
1779 if (rc)
1780 goto unregister_pcc_chan;
1781
1782 rc = hccs_get_hw_info(hdev);
1783 if (rc)
1784 goto unregister_pcc_chan;
1785
1786 rc = hccs_init_type_name_maps(hdev);
1787 if (rc)
1788 goto unregister_pcc_chan;
1789
1790 rc = hccs_create_topo_dirs(hdev);
1791 if (rc)
1792 goto unregister_pcc_chan;
1793
1794 return 0;
1795
1796unregister_pcc_chan:
1797 hccs_unregister_pcc_channel(hdev);
1798
1799 return rc;
1800}
1801
1802static void hccs_remove(struct platform_device *pdev)
1803{
1804 struct hccs_dev *hdev = platform_get_drvdata(pdev);
1805
1806 hccs_remove_topo_dirs(hdev);
1807 hccs_unregister_pcc_channel(hdev);
1808}
1809
1810static const struct hccs_verspecific_data hisi04b1_verspec_data = {
1811 .rx_callback = NULL,
1812 .wait_cmd_complete = hccs_wait_cmd_complete_by_poll,
1813 .fill_pcc_shared_mem = hccs_fill_pcc_shared_mem_region,
1814 .shared_mem_size = sizeof(struct acpi_pcct_shared_memory),
1815 .has_txdone_irq = false,
1816};
1817
1818static const struct hccs_verspecific_data hisi04b2_verspec_data = {
1819 .rx_callback = hccs_pcc_rx_callback,
1820 .wait_cmd_complete = hccs_wait_cmd_complete_by_irq,
1821 .fill_pcc_shared_mem = hccs_fill_ext_pcc_shared_mem_region,
1822 .shared_mem_size = sizeof(struct acpi_pcct_ext_pcc_shared_memory),
1823 .has_txdone_irq = true,
1824};
1825
1826static const struct acpi_device_id hccs_acpi_match[] = {
1827 { "HISI04B1", (unsigned long)&hisi04b1_verspec_data},
1828 { "HISI04B2", (unsigned long)&hisi04b2_verspec_data},
1829 { }
1830};
1831MODULE_DEVICE_TABLE(acpi, hccs_acpi_match);
1832
1833static struct platform_driver hccs_driver = {
1834 .probe = hccs_probe,
1835 .remove = hccs_remove,
1836 .driver = {
1837 .name = "kunpeng_hccs",
1838 .acpi_match_table = hccs_acpi_match,
1839 },
1840};
1841
1842module_platform_driver(hccs_driver);
1843
1844MODULE_DESCRIPTION("Kunpeng SoC HCCS driver");
1845MODULE_LICENSE("GPL");
1846MODULE_AUTHOR("Huisong Li <lihuisong@huawei.com>");
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * The Huawei Cache Coherence System (HCCS) is a multi-chip interconnection
4 * bus protocol.
5 *
6 * Copyright (c) 2023 Hisilicon Limited.
7 * Author: Huisong Li <lihuisong@huawei.com>
8 *
9 * HCCS driver for Kunpeng SoC provides the following features:
10 * - Retrieve the following information about each port:
11 * - port type
12 * - lane mode
13 * - enable
14 * - current lane mode
15 * - link finite state machine
16 * - lane mask
17 * - CRC error count
18 *
19 * - Retrieve the following information about all the ports on the chip or
20 * the die:
21 * - if all enabled ports are in linked
22 * - if all linked ports are in full lane
23 * - CRC error count sum
24 */
25#include <linux/acpi.h>
26#include <linux/iopoll.h>
27#include <linux/platform_device.h>
28#include <linux/sysfs.h>
29
30#include <acpi/pcc.h>
31
32#include "kunpeng_hccs.h"
33
34/*
35 * Arbitrary retries in case the remote processor is slow to respond
36 * to PCC commands
37 */
38#define HCCS_PCC_CMD_WAIT_RETRIES_NUM 500ULL
39#define HCCS_POLL_STATUS_TIME_INTERVAL_US 3
40
41static struct hccs_port_info *kobj_to_port_info(struct kobject *k)
42{
43 return container_of(k, struct hccs_port_info, kobj);
44}
45
46static struct hccs_die_info *kobj_to_die_info(struct kobject *k)
47{
48 return container_of(k, struct hccs_die_info, kobj);
49}
50
51static struct hccs_chip_info *kobj_to_chip_info(struct kobject *k)
52{
53 return container_of(k, struct hccs_chip_info, kobj);
54}
55
56struct hccs_register_ctx {
57 struct device *dev;
58 u8 chan_id;
59 int err;
60};
61
62static acpi_status hccs_get_register_cb(struct acpi_resource *ares,
63 void *context)
64{
65 struct acpi_resource_generic_register *reg;
66 struct hccs_register_ctx *ctx = context;
67
68 if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
69 return AE_OK;
70
71 reg = &ares->data.generic_reg;
72 if (reg->space_id != ACPI_ADR_SPACE_PLATFORM_COMM) {
73 dev_err(ctx->dev, "Bad register resource.\n");
74 ctx->err = -EINVAL;
75 return AE_ERROR;
76 }
77 ctx->chan_id = reg->access_size;
78
79 return AE_OK;
80}
81
82static int hccs_get_pcc_chan_id(struct hccs_dev *hdev)
83{
84 acpi_handle handle = ACPI_HANDLE(hdev->dev);
85 struct hccs_register_ctx ctx = {0};
86 acpi_status status;
87
88 if (!acpi_has_method(handle, METHOD_NAME__CRS)) {
89 dev_err(hdev->dev, "No _CRS method.\n");
90 return -ENODEV;
91 }
92
93 ctx.dev = hdev->dev;
94 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
95 hccs_get_register_cb, &ctx);
96 if (ACPI_FAILURE(status))
97 return ctx.err;
98 hdev->chan_id = ctx.chan_id;
99
100 return 0;
101}
102
103static void hccs_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
104{
105 if (ret < 0)
106 pr_debug("TX did not complete: CMD sent:0x%x, ret:%d\n",
107 *(u8 *)msg, ret);
108 else
109 pr_debug("TX completed. CMD sent:0x%x, ret:%d\n",
110 *(u8 *)msg, ret);
111}
112
113static void hccs_pcc_rx_callback(struct mbox_client *cl, void *mssg)
114{
115 struct hccs_mbox_client_info *cl_info =
116 container_of(cl, struct hccs_mbox_client_info, client);
117
118 complete(&cl_info->done);
119}
120
121static void hccs_unregister_pcc_channel(struct hccs_dev *hdev)
122{
123 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
124
125 if (cl_info->pcc_comm_addr)
126 iounmap(cl_info->pcc_comm_addr);
127 pcc_mbox_free_channel(hdev->cl_info.pcc_chan);
128}
129
130static int hccs_register_pcc_channel(struct hccs_dev *hdev)
131{
132 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
133 struct mbox_client *cl = &cl_info->client;
134 struct pcc_mbox_chan *pcc_chan;
135 struct device *dev = hdev->dev;
136 int rc;
137
138 cl->dev = dev;
139 cl->tx_block = false;
140 cl->knows_txdone = true;
141 cl->tx_done = hccs_chan_tx_done;
142 cl->rx_callback = hdev->verspec_data->rx_callback;
143 init_completion(&cl_info->done);
144
145 pcc_chan = pcc_mbox_request_channel(cl, hdev->chan_id);
146 if (IS_ERR(pcc_chan)) {
147 dev_err(dev, "PPC channel request failed.\n");
148 rc = -ENODEV;
149 goto out;
150 }
151 cl_info->pcc_chan = pcc_chan;
152 cl_info->mbox_chan = pcc_chan->mchan;
153
154 /*
155 * pcc_chan->latency is just a nominal value. In reality the remote
156 * processor could be much slower to reply. So add an arbitrary amount
157 * of wait on top of nominal.
158 */
159 cl_info->deadline_us =
160 HCCS_PCC_CMD_WAIT_RETRIES_NUM * pcc_chan->latency;
161 if (!hdev->verspec_data->has_txdone_irq &&
162 cl_info->mbox_chan->mbox->txdone_irq) {
163 dev_err(dev, "PCC IRQ in PCCT is enabled.\n");
164 rc = -EINVAL;
165 goto err_mbx_channel_free;
166 } else if (hdev->verspec_data->has_txdone_irq &&
167 !cl_info->mbox_chan->mbox->txdone_irq) {
168 dev_err(dev, "PCC IRQ in PCCT isn't supported.\n");
169 rc = -EINVAL;
170 goto err_mbx_channel_free;
171 }
172
173 if (pcc_chan->shmem_base_addr) {
174 cl_info->pcc_comm_addr = ioremap(pcc_chan->shmem_base_addr,
175 pcc_chan->shmem_size);
176 if (!cl_info->pcc_comm_addr) {
177 dev_err(dev, "Failed to ioremap PCC communication region for channel-%u.\n",
178 hdev->chan_id);
179 rc = -ENOMEM;
180 goto err_mbx_channel_free;
181 }
182 }
183
184 return 0;
185
186err_mbx_channel_free:
187 pcc_mbox_free_channel(cl_info->pcc_chan);
188out:
189 return rc;
190}
191
192static int hccs_wait_cmd_complete_by_poll(struct hccs_dev *hdev)
193{
194 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
195 struct acpi_pcct_shared_memory __iomem *comm_base =
196 cl_info->pcc_comm_addr;
197 u16 status;
198 int ret;
199
200 /*
201 * Poll PCC status register every 3us(delay_us) for maximum of
202 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
203 */
204 ret = readw_poll_timeout(&comm_base->status, status,
205 status & PCC_STATUS_CMD_COMPLETE,
206 HCCS_POLL_STATUS_TIME_INTERVAL_US,
207 cl_info->deadline_us);
208 if (unlikely(ret))
209 dev_err(hdev->dev, "poll PCC status failed, ret = %d.\n", ret);
210
211 return ret;
212}
213
214static int hccs_wait_cmd_complete_by_irq(struct hccs_dev *hdev)
215{
216 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
217
218 if (!wait_for_completion_timeout(&cl_info->done,
219 usecs_to_jiffies(cl_info->deadline_us))) {
220 dev_err(hdev->dev, "PCC command executed timeout!\n");
221 return -ETIMEDOUT;
222 }
223
224 return 0;
225}
226
227static inline void hccs_fill_pcc_shared_mem_region(struct hccs_dev *hdev,
228 u8 cmd,
229 struct hccs_desc *desc,
230 void __iomem *comm_space,
231 u16 space_size)
232{
233 struct acpi_pcct_shared_memory tmp = {
234 .signature = PCC_SIGNATURE | hdev->chan_id,
235 .command = cmd,
236 .status = 0,
237 };
238
239 memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
240 sizeof(struct acpi_pcct_shared_memory));
241
242 /* Copy the message to the PCC comm space */
243 memcpy_toio(comm_space, (void *)desc, space_size);
244}
245
246static inline void hccs_fill_ext_pcc_shared_mem_region(struct hccs_dev *hdev,
247 u8 cmd,
248 struct hccs_desc *desc,
249 void __iomem *comm_space,
250 u16 space_size)
251{
252 struct acpi_pcct_ext_pcc_shared_memory tmp = {
253 .signature = PCC_SIGNATURE | hdev->chan_id,
254 .flags = PCC_CMD_COMPLETION_NOTIFY,
255 .length = HCCS_PCC_SHARE_MEM_BYTES,
256 .command = cmd,
257 };
258
259 memcpy_toio(hdev->cl_info.pcc_comm_addr, (void *)&tmp,
260 sizeof(struct acpi_pcct_ext_pcc_shared_memory));
261
262 /* Copy the message to the PCC comm space */
263 memcpy_toio(comm_space, (void *)desc, space_size);
264}
265
266static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
267 struct hccs_desc *desc)
268{
269 const struct hccs_verspecific_data *verspec_data = hdev->verspec_data;
270 struct hccs_mbox_client_info *cl_info = &hdev->cl_info;
271 struct hccs_fw_inner_head *fw_inner_head;
272 void __iomem *comm_space;
273 u16 space_size;
274 int ret;
275
276 comm_space = cl_info->pcc_comm_addr + verspec_data->shared_mem_size;
277 space_size = HCCS_PCC_SHARE_MEM_BYTES - verspec_data->shared_mem_size;
278 verspec_data->fill_pcc_shared_mem(hdev, cmd, desc,
279 comm_space, space_size);
280 if (verspec_data->has_txdone_irq)
281 reinit_completion(&cl_info->done);
282
283 /* Ring doorbell */
284 ret = mbox_send_message(cl_info->mbox_chan, &cmd);
285 if (ret < 0) {
286 dev_err(hdev->dev, "Send PCC mbox message failed, ret = %d.\n",
287 ret);
288 goto end;
289 }
290
291 ret = verspec_data->wait_cmd_complete(hdev);
292 if (ret)
293 goto end;
294
295 /* Copy response data */
296 memcpy_fromio((void *)desc, comm_space, space_size);
297 fw_inner_head = &desc->rsp.fw_inner_head;
298 if (fw_inner_head->retStatus) {
299 dev_err(hdev->dev, "Execute PCC command failed, error code = %u.\n",
300 fw_inner_head->retStatus);
301 ret = -EIO;
302 }
303
304end:
305 if (verspec_data->has_txdone_irq)
306 mbox_chan_txdone(cl_info->mbox_chan, ret);
307 else
308 mbox_client_txdone(cl_info->mbox_chan, ret);
309 return ret;
310}
311
312static void hccs_init_req_desc(struct hccs_desc *desc)
313{
314 struct hccs_req_desc *req = &desc->req;
315
316 memset(desc, 0, sizeof(*desc));
317 req->req_head.module_code = HCCS_SERDES_MODULE_CODE;
318}
319
320static int hccs_get_dev_caps(struct hccs_dev *hdev)
321{
322 struct hccs_desc desc;
323 int ret;
324
325 hccs_init_req_desc(&desc);
326 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DEV_CAP, &desc);
327 if (ret) {
328 dev_err(hdev->dev, "Get device capabilities failed, ret = %d.\n",
329 ret);
330 return ret;
331 }
332 memcpy(&hdev->caps, desc.rsp.data, sizeof(hdev->caps));
333
334 return 0;
335}
336
337static int hccs_query_chip_num_on_platform(struct hccs_dev *hdev)
338{
339 struct hccs_desc desc;
340 int ret;
341
342 hccs_init_req_desc(&desc);
343 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_CHIP_NUM, &desc);
344 if (ret) {
345 dev_err(hdev->dev, "query system chip number failed, ret = %d.\n",
346 ret);
347 return ret;
348 }
349
350 hdev->chip_num = *((u8 *)&desc.rsp.data);
351 if (!hdev->chip_num) {
352 dev_err(hdev->dev, "chip num obtained from firmware is zero.\n");
353 return -EINVAL;
354 }
355
356 return 0;
357}
358
359static int hccs_get_chip_info(struct hccs_dev *hdev,
360 struct hccs_chip_info *chip)
361{
362 struct hccs_die_num_req_param *req_param;
363 struct hccs_desc desc;
364 int ret;
365
366 hccs_init_req_desc(&desc);
367 req_param = (struct hccs_die_num_req_param *)desc.req.data;
368 req_param->chip_id = chip->chip_id;
369 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_NUM, &desc);
370 if (ret)
371 return ret;
372
373 chip->die_num = *((u8 *)&desc.rsp.data);
374
375 return 0;
376}
377
378static int hccs_query_chip_info_on_platform(struct hccs_dev *hdev)
379{
380 struct hccs_chip_info *chip;
381 int ret;
382 u8 idx;
383
384 ret = hccs_query_chip_num_on_platform(hdev);
385 if (ret) {
386 dev_err(hdev->dev, "query chip number on platform failed, ret = %d.\n",
387 ret);
388 return ret;
389 }
390
391 hdev->chips = devm_kzalloc(hdev->dev,
392 hdev->chip_num * sizeof(struct hccs_chip_info),
393 GFP_KERNEL);
394 if (!hdev->chips) {
395 dev_err(hdev->dev, "allocate all chips memory failed.\n");
396 return -ENOMEM;
397 }
398
399 for (idx = 0; idx < hdev->chip_num; idx++) {
400 chip = &hdev->chips[idx];
401 chip->chip_id = idx;
402 ret = hccs_get_chip_info(hdev, chip);
403 if (ret) {
404 dev_err(hdev->dev, "get chip%u info failed, ret = %d.\n",
405 idx, ret);
406 return ret;
407 }
408 chip->hdev = hdev;
409 }
410
411 return 0;
412}
413
414static int hccs_query_die_info_on_chip(struct hccs_dev *hdev, u8 chip_id,
415 u8 die_idx, struct hccs_die_info *die)
416{
417 struct hccs_die_info_req_param *req_param;
418 struct hccs_die_info_rsp_data *rsp_data;
419 struct hccs_desc desc;
420 int ret;
421
422 hccs_init_req_desc(&desc);
423 req_param = (struct hccs_die_info_req_param *)desc.req.data;
424 req_param->chip_id = chip_id;
425 req_param->die_idx = die_idx;
426 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_INFO, &desc);
427 if (ret)
428 return ret;
429
430 rsp_data = (struct hccs_die_info_rsp_data *)desc.rsp.data;
431 die->die_id = rsp_data->die_id;
432 die->port_num = rsp_data->port_num;
433 die->min_port_id = rsp_data->min_port_id;
434 die->max_port_id = rsp_data->max_port_id;
435 if (die->min_port_id > die->max_port_id) {
436 dev_err(hdev->dev, "min port id(%u) > max port id(%u) on die_idx(%u).\n",
437 die->min_port_id, die->max_port_id, die_idx);
438 return -EINVAL;
439 }
440 if (die->max_port_id > HCCS_DIE_MAX_PORT_ID) {
441 dev_err(hdev->dev, "max port id(%u) on die_idx(%u) is too big.\n",
442 die->max_port_id, die_idx);
443 return -EINVAL;
444 }
445
446 return 0;
447}
448
449static int hccs_query_all_die_info_on_platform(struct hccs_dev *hdev)
450{
451 struct device *dev = hdev->dev;
452 struct hccs_chip_info *chip;
453 struct hccs_die_info *die;
454 u8 i, j;
455 int ret;
456
457 for (i = 0; i < hdev->chip_num; i++) {
458 chip = &hdev->chips[i];
459 if (!chip->die_num)
460 continue;
461
462 chip->dies = devm_kzalloc(hdev->dev,
463 chip->die_num * sizeof(struct hccs_die_info),
464 GFP_KERNEL);
465 if (!chip->dies) {
466 dev_err(dev, "allocate all dies memory on chip%u failed.\n",
467 i);
468 return -ENOMEM;
469 }
470
471 for (j = 0; j < chip->die_num; j++) {
472 die = &chip->dies[j];
473 ret = hccs_query_die_info_on_chip(hdev, i, j, die);
474 if (ret) {
475 dev_err(dev, "get die idx (%u) info on chip%u failed, ret = %d.\n",
476 j, i, ret);
477 return ret;
478 }
479 die->chip = chip;
480 }
481 }
482
483 return 0;
484}
485
486static int hccs_get_bd_info(struct hccs_dev *hdev, u8 opcode,
487 struct hccs_desc *desc,
488 void *buf, size_t buf_len,
489 struct hccs_rsp_head *rsp_head)
490{
491 struct hccs_rsp_head *head;
492 struct hccs_rsp_desc *rsp;
493 int ret;
494
495 ret = hccs_pcc_cmd_send(hdev, opcode, desc);
496 if (ret)
497 return ret;
498
499 rsp = &desc->rsp;
500 head = &rsp->rsp_head;
501 if (head->data_len > buf_len) {
502 dev_err(hdev->dev,
503 "buffer overflow (buf_len = %zu, data_len = %u)!\n",
504 buf_len, head->data_len);
505 return -ENOMEM;
506 }
507
508 memcpy(buf, rsp->data, head->data_len);
509 *rsp_head = *head;
510
511 return 0;
512}
513
514static int hccs_get_all_port_attr(struct hccs_dev *hdev,
515 struct hccs_die_info *die,
516 struct hccs_port_attr *attrs, u16 size)
517{
518 struct hccs_die_comm_req_param *req_param;
519 struct hccs_req_head *req_head;
520 struct hccs_rsp_head rsp_head;
521 struct hccs_desc desc;
522 size_t left_buf_len;
523 u32 data_len = 0;
524 u8 start_id;
525 u8 *buf;
526 int ret;
527
528 buf = (u8 *)attrs;
529 left_buf_len = sizeof(struct hccs_port_attr) * size;
530 start_id = die->min_port_id;
531 while (start_id <= die->max_port_id) {
532 hccs_init_req_desc(&desc);
533 req_head = &desc.req.req_head;
534 req_head->start_id = start_id;
535 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
536 req_param->chip_id = die->chip->chip_id;
537 req_param->die_id = die->die_id;
538
539 ret = hccs_get_bd_info(hdev, HCCS_GET_DIE_PORT_INFO, &desc,
540 buf + data_len, left_buf_len, &rsp_head);
541 if (ret) {
542 dev_err(hdev->dev,
543 "get the information of port%u on die%u failed, ret = %d.\n",
544 start_id, die->die_id, ret);
545 return ret;
546 }
547
548 data_len += rsp_head.data_len;
549 left_buf_len -= rsp_head.data_len;
550 if (unlikely(rsp_head.next_id <= start_id)) {
551 dev_err(hdev->dev,
552 "next port id (%u) is not greater than last start id (%u) on die%u.\n",
553 rsp_head.next_id, start_id, die->die_id);
554 return -EINVAL;
555 }
556 start_id = rsp_head.next_id;
557 }
558
559 return 0;
560}
561
562static int hccs_get_all_port_info_on_die(struct hccs_dev *hdev,
563 struct hccs_die_info *die)
564{
565 struct hccs_port_attr *attrs;
566 struct hccs_port_info *port;
567 int ret;
568 u8 i;
569
570 attrs = kcalloc(die->port_num, sizeof(struct hccs_port_attr),
571 GFP_KERNEL);
572 if (!attrs)
573 return -ENOMEM;
574
575 ret = hccs_get_all_port_attr(hdev, die, attrs, die->port_num);
576 if (ret)
577 goto out;
578
579 for (i = 0; i < die->port_num; i++) {
580 port = &die->ports[i];
581 port->port_id = attrs[i].port_id;
582 port->port_type = attrs[i].port_type;
583 port->lane_mode = attrs[i].lane_mode;
584 port->enable = attrs[i].enable;
585 port->die = die;
586 }
587
588out:
589 kfree(attrs);
590 return ret;
591}
592
593static int hccs_query_all_port_info_on_platform(struct hccs_dev *hdev)
594{
595 struct device *dev = hdev->dev;
596 struct hccs_chip_info *chip;
597 struct hccs_die_info *die;
598 u8 i, j;
599 int ret;
600
601 for (i = 0; i < hdev->chip_num; i++) {
602 chip = &hdev->chips[i];
603 for (j = 0; j < chip->die_num; j++) {
604 die = &chip->dies[j];
605 if (!die->port_num)
606 continue;
607
608 die->ports = devm_kzalloc(dev,
609 die->port_num * sizeof(struct hccs_port_info),
610 GFP_KERNEL);
611 if (!die->ports) {
612 dev_err(dev, "allocate ports memory on chip%u/die%u failed.\n",
613 i, die->die_id);
614 return -ENOMEM;
615 }
616
617 ret = hccs_get_all_port_info_on_die(hdev, die);
618 if (ret) {
619 dev_err(dev, "get all port info on chip%u/die%u failed, ret = %d.\n",
620 i, die->die_id, ret);
621 return ret;
622 }
623 }
624 }
625
626 return 0;
627}
628
629static int hccs_get_hw_info(struct hccs_dev *hdev)
630{
631 int ret;
632
633 ret = hccs_query_chip_info_on_platform(hdev);
634 if (ret) {
635 dev_err(hdev->dev, "query chip info on platform failed, ret = %d.\n",
636 ret);
637 return ret;
638 }
639
640 ret = hccs_query_all_die_info_on_platform(hdev);
641 if (ret) {
642 dev_err(hdev->dev, "query all die info on platform failed, ret = %d.\n",
643 ret);
644 return ret;
645 }
646
647 ret = hccs_query_all_port_info_on_platform(hdev);
648 if (ret) {
649 dev_err(hdev->dev, "query all port info on platform failed, ret = %d.\n",
650 ret);
651 return ret;
652 }
653
654 return 0;
655}
656
657static int hccs_query_port_link_status(struct hccs_dev *hdev,
658 const struct hccs_port_info *port,
659 struct hccs_link_status *link_status)
660{
661 const struct hccs_die_info *die = port->die;
662 const struct hccs_chip_info *chip = die->chip;
663 struct hccs_port_comm_req_param *req_param;
664 struct hccs_desc desc;
665 int ret;
666
667 hccs_init_req_desc(&desc);
668 req_param = (struct hccs_port_comm_req_param *)desc.req.data;
669 req_param->chip_id = chip->chip_id;
670 req_param->die_id = die->die_id;
671 req_param->port_id = port->port_id;
672 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_LINK_STATUS, &desc);
673 if (ret) {
674 dev_err(hdev->dev,
675 "get port link status info failed, ret = %d.\n", ret);
676 return ret;
677 }
678
679 *link_status = *((struct hccs_link_status *)desc.rsp.data);
680
681 return 0;
682}
683
684static int hccs_query_port_crc_err_cnt(struct hccs_dev *hdev,
685 const struct hccs_port_info *port,
686 u64 *crc_err_cnt)
687{
688 const struct hccs_die_info *die = port->die;
689 const struct hccs_chip_info *chip = die->chip;
690 struct hccs_port_comm_req_param *req_param;
691 struct hccs_desc desc;
692 int ret;
693
694 hccs_init_req_desc(&desc);
695 req_param = (struct hccs_port_comm_req_param *)desc.req.data;
696 req_param->chip_id = chip->chip_id;
697 req_param->die_id = die->die_id;
698 req_param->port_id = port->port_id;
699 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_PORT_CRC_ERR_CNT, &desc);
700 if (ret) {
701 dev_err(hdev->dev,
702 "get port crc error count failed, ret = %d.\n", ret);
703 return ret;
704 }
705
706 memcpy(crc_err_cnt, &desc.rsp.data, sizeof(u64));
707
708 return 0;
709}
710
711static int hccs_get_die_all_link_status(struct hccs_dev *hdev,
712 const struct hccs_die_info *die,
713 u8 *all_linked)
714{
715 struct hccs_die_comm_req_param *req_param;
716 struct hccs_desc desc;
717 int ret;
718
719 if (die->port_num == 0) {
720 *all_linked = 1;
721 return 0;
722 }
723
724 hccs_init_req_desc(&desc);
725 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
726 req_param->chip_id = die->chip->chip_id;
727 req_param->die_id = die->die_id;
728 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LINK_STA, &desc);
729 if (ret) {
730 dev_err(hdev->dev,
731 "get link status of all ports failed on die%u, ret = %d.\n",
732 die->die_id, ret);
733 return ret;
734 }
735
736 *all_linked = *((u8 *)&desc.rsp.data);
737
738 return 0;
739}
740
741static int hccs_get_die_all_port_lane_status(struct hccs_dev *hdev,
742 const struct hccs_die_info *die,
743 u8 *full_lane)
744{
745 struct hccs_die_comm_req_param *req_param;
746 struct hccs_desc desc;
747 int ret;
748
749 if (die->port_num == 0) {
750 *full_lane = 1;
751 return 0;
752 }
753
754 hccs_init_req_desc(&desc);
755 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
756 req_param->chip_id = die->chip->chip_id;
757 req_param->die_id = die->die_id;
758 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_LANE_STA, &desc);
759 if (ret) {
760 dev_err(hdev->dev, "get lane status of all ports failed on die%u, ret = %d.\n",
761 die->die_id, ret);
762 return ret;
763 }
764
765 *full_lane = *((u8 *)&desc.rsp.data);
766
767 return 0;
768}
769
770static int hccs_get_die_total_crc_err_cnt(struct hccs_dev *hdev,
771 const struct hccs_die_info *die,
772 u64 *total_crc_err_cnt)
773{
774 struct hccs_die_comm_req_param *req_param;
775 struct hccs_desc desc;
776 int ret;
777
778 if (die->port_num == 0) {
779 *total_crc_err_cnt = 0;
780 return 0;
781 }
782
783 hccs_init_req_desc(&desc);
784 req_param = (struct hccs_die_comm_req_param *)desc.req.data;
785 req_param->chip_id = die->chip->chip_id;
786 req_param->die_id = die->die_id;
787 ret = hccs_pcc_cmd_send(hdev, HCCS_GET_DIE_PORTS_CRC_ERR_CNT, &desc);
788 if (ret) {
789 dev_err(hdev->dev, "get crc error count sum failed on die%u, ret = %d.\n",
790 die->die_id, ret);
791 return ret;
792 }
793
794 memcpy(total_crc_err_cnt, &desc.rsp.data, sizeof(u64));
795
796 return 0;
797}
798
799static ssize_t hccs_show(struct kobject *k, struct attribute *attr, char *buf)
800{
801 struct kobj_attribute *kobj_attr;
802
803 kobj_attr = container_of(attr, struct kobj_attribute, attr);
804
805 return kobj_attr->show(k, kobj_attr, buf);
806}
807
808static const struct sysfs_ops hccs_comm_ops = {
809 .show = hccs_show,
810};
811
812static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
813 char *buf)
814{
815 const struct hccs_port_info *port = kobj_to_port_info(kobj);
816
817 return sysfs_emit(buf, "HCCS-v%u\n", port->port_type);
818}
819static struct kobj_attribute hccs_type_attr = __ATTR_RO(type);
820
821static ssize_t lane_mode_show(struct kobject *kobj, struct kobj_attribute *attr,
822 char *buf)
823{
824 const struct hccs_port_info *port = kobj_to_port_info(kobj);
825
826 return sysfs_emit(buf, "x%u\n", port->lane_mode);
827}
828static struct kobj_attribute lane_mode_attr = __ATTR_RO(lane_mode);
829
830static ssize_t enable_show(struct kobject *kobj,
831 struct kobj_attribute *attr, char *buf)
832{
833 const struct hccs_port_info *port = kobj_to_port_info(kobj);
834
835 return sysfs_emit(buf, "%u\n", port->enable);
836}
837static struct kobj_attribute port_enable_attr = __ATTR_RO(enable);
838
839static ssize_t cur_lane_num_show(struct kobject *kobj,
840 struct kobj_attribute *attr, char *buf)
841{
842 const struct hccs_port_info *port = kobj_to_port_info(kobj);
843 struct hccs_dev *hdev = port->die->chip->hdev;
844 struct hccs_link_status link_status = {0};
845 int ret;
846
847 mutex_lock(&hdev->lock);
848 ret = hccs_query_port_link_status(hdev, port, &link_status);
849 mutex_unlock(&hdev->lock);
850 if (ret)
851 return ret;
852
853 return sysfs_emit(buf, "%u\n", link_status.lane_num);
854}
855static struct kobj_attribute cur_lane_num_attr = __ATTR_RO(cur_lane_num);
856
857static ssize_t link_fsm_show(struct kobject *kobj,
858 struct kobj_attribute *attr, char *buf)
859{
860 const struct hccs_port_info *port = kobj_to_port_info(kobj);
861 struct hccs_dev *hdev = port->die->chip->hdev;
862 struct hccs_link_status link_status = {0};
863 const struct {
864 u8 link_fsm;
865 char *str;
866 } link_fsm_map[] = {
867 {HCCS_PORT_RESET, "reset"},
868 {HCCS_PORT_SETUP, "setup"},
869 {HCCS_PORT_CONFIG, "config"},
870 {HCCS_PORT_READY, "link-up"},
871 };
872 const char *link_fsm_str = "unknown";
873 size_t i;
874 int ret;
875
876 mutex_lock(&hdev->lock);
877 ret = hccs_query_port_link_status(hdev, port, &link_status);
878 mutex_unlock(&hdev->lock);
879 if (ret)
880 return ret;
881
882 for (i = 0; i < ARRAY_SIZE(link_fsm_map); i++) {
883 if (link_fsm_map[i].link_fsm == link_status.link_fsm) {
884 link_fsm_str = link_fsm_map[i].str;
885 break;
886 }
887 }
888
889 return sysfs_emit(buf, "%s\n", link_fsm_str);
890}
891static struct kobj_attribute link_fsm_attr = __ATTR_RO(link_fsm);
892
893static ssize_t lane_mask_show(struct kobject *kobj,
894 struct kobj_attribute *attr, char *buf)
895{
896 const struct hccs_port_info *port = kobj_to_port_info(kobj);
897 struct hccs_dev *hdev = port->die->chip->hdev;
898 struct hccs_link_status link_status = {0};
899 int ret;
900
901 mutex_lock(&hdev->lock);
902 ret = hccs_query_port_link_status(hdev, port, &link_status);
903 mutex_unlock(&hdev->lock);
904 if (ret)
905 return ret;
906
907 return sysfs_emit(buf, "0x%x\n", link_status.lane_mask);
908}
909static struct kobj_attribute lane_mask_attr = __ATTR_RO(lane_mask);
910
911static ssize_t crc_err_cnt_show(struct kobject *kobj,
912 struct kobj_attribute *attr, char *buf)
913{
914 const struct hccs_port_info *port = kobj_to_port_info(kobj);
915 struct hccs_dev *hdev = port->die->chip->hdev;
916 u64 crc_err_cnt;
917 int ret;
918
919 mutex_lock(&hdev->lock);
920 ret = hccs_query_port_crc_err_cnt(hdev, port, &crc_err_cnt);
921 mutex_unlock(&hdev->lock);
922 if (ret)
923 return ret;
924
925 return sysfs_emit(buf, "%llu\n", crc_err_cnt);
926}
927static struct kobj_attribute crc_err_cnt_attr = __ATTR_RO(crc_err_cnt);
928
929static struct attribute *hccs_port_default_attrs[] = {
930 &hccs_type_attr.attr,
931 &lane_mode_attr.attr,
932 &port_enable_attr.attr,
933 &cur_lane_num_attr.attr,
934 &link_fsm_attr.attr,
935 &lane_mask_attr.attr,
936 &crc_err_cnt_attr.attr,
937 NULL,
938};
939ATTRIBUTE_GROUPS(hccs_port_default);
940
941static const struct kobj_type hccs_port_type = {
942 .sysfs_ops = &hccs_comm_ops,
943 .default_groups = hccs_port_default_groups,
944};
945
946static ssize_t all_linked_on_die_show(struct kobject *kobj,
947 struct kobj_attribute *attr, char *buf)
948{
949 const struct hccs_die_info *die = kobj_to_die_info(kobj);
950 struct hccs_dev *hdev = die->chip->hdev;
951 u8 all_linked;
952 int ret;
953
954 mutex_lock(&hdev->lock);
955 ret = hccs_get_die_all_link_status(hdev, die, &all_linked);
956 mutex_unlock(&hdev->lock);
957 if (ret)
958 return ret;
959
960 return sysfs_emit(buf, "%u\n", all_linked);
961}
962static struct kobj_attribute all_linked_on_die_attr =
963 __ATTR(all_linked, 0444, all_linked_on_die_show, NULL);
964
965static ssize_t linked_full_lane_on_die_show(struct kobject *kobj,
966 struct kobj_attribute *attr,
967 char *buf)
968{
969 const struct hccs_die_info *die = kobj_to_die_info(kobj);
970 struct hccs_dev *hdev = die->chip->hdev;
971 u8 full_lane;
972 int ret;
973
974 mutex_lock(&hdev->lock);
975 ret = hccs_get_die_all_port_lane_status(hdev, die, &full_lane);
976 mutex_unlock(&hdev->lock);
977 if (ret)
978 return ret;
979
980 return sysfs_emit(buf, "%u\n", full_lane);
981}
982static struct kobj_attribute linked_full_lane_on_die_attr =
983 __ATTR(linked_full_lane, 0444, linked_full_lane_on_die_show, NULL);
984
985static ssize_t crc_err_cnt_sum_on_die_show(struct kobject *kobj,
986 struct kobj_attribute *attr,
987 char *buf)
988{
989 const struct hccs_die_info *die = kobj_to_die_info(kobj);
990 struct hccs_dev *hdev = die->chip->hdev;
991 u64 total_crc_err_cnt;
992 int ret;
993
994 mutex_lock(&hdev->lock);
995 ret = hccs_get_die_total_crc_err_cnt(hdev, die, &total_crc_err_cnt);
996 mutex_unlock(&hdev->lock);
997 if (ret)
998 return ret;
999
1000 return sysfs_emit(buf, "%llu\n", total_crc_err_cnt);
1001}
1002static struct kobj_attribute crc_err_cnt_sum_on_die_attr =
1003 __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_die_show, NULL);
1004
1005static struct attribute *hccs_die_default_attrs[] = {
1006 &all_linked_on_die_attr.attr,
1007 &linked_full_lane_on_die_attr.attr,
1008 &crc_err_cnt_sum_on_die_attr.attr,
1009 NULL,
1010};
1011ATTRIBUTE_GROUPS(hccs_die_default);
1012
1013static const struct kobj_type hccs_die_type = {
1014 .sysfs_ops = &hccs_comm_ops,
1015 .default_groups = hccs_die_default_groups,
1016};
1017
1018static ssize_t all_linked_on_chip_show(struct kobject *kobj,
1019 struct kobj_attribute *attr, char *buf)
1020{
1021 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1022 struct hccs_dev *hdev = chip->hdev;
1023 const struct hccs_die_info *die;
1024 u8 all_linked = 1;
1025 u8 i, tmp;
1026 int ret;
1027
1028 mutex_lock(&hdev->lock);
1029 for (i = 0; i < chip->die_num; i++) {
1030 die = &chip->dies[i];
1031 ret = hccs_get_die_all_link_status(hdev, die, &tmp);
1032 if (ret) {
1033 mutex_unlock(&hdev->lock);
1034 return ret;
1035 }
1036 if (tmp != all_linked) {
1037 all_linked = 0;
1038 break;
1039 }
1040 }
1041 mutex_unlock(&hdev->lock);
1042
1043 return sysfs_emit(buf, "%u\n", all_linked);
1044}
1045static struct kobj_attribute all_linked_on_chip_attr =
1046 __ATTR(all_linked, 0444, all_linked_on_chip_show, NULL);
1047
1048static ssize_t linked_full_lane_on_chip_show(struct kobject *kobj,
1049 struct kobj_attribute *attr,
1050 char *buf)
1051{
1052 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1053 struct hccs_dev *hdev = chip->hdev;
1054 const struct hccs_die_info *die;
1055 u8 full_lane = 1;
1056 u8 i, tmp;
1057 int ret;
1058
1059 mutex_lock(&hdev->lock);
1060 for (i = 0; i < chip->die_num; i++) {
1061 die = &chip->dies[i];
1062 ret = hccs_get_die_all_port_lane_status(hdev, die, &tmp);
1063 if (ret) {
1064 mutex_unlock(&hdev->lock);
1065 return ret;
1066 }
1067 if (tmp != full_lane) {
1068 full_lane = 0;
1069 break;
1070 }
1071 }
1072 mutex_unlock(&hdev->lock);
1073
1074 return sysfs_emit(buf, "%u\n", full_lane);
1075}
1076static struct kobj_attribute linked_full_lane_on_chip_attr =
1077 __ATTR(linked_full_lane, 0444, linked_full_lane_on_chip_show, NULL);
1078
1079static ssize_t crc_err_cnt_sum_on_chip_show(struct kobject *kobj,
1080 struct kobj_attribute *attr,
1081 char *buf)
1082{
1083 const struct hccs_chip_info *chip = kobj_to_chip_info(kobj);
1084 u64 crc_err_cnt, total_crc_err_cnt = 0;
1085 struct hccs_dev *hdev = chip->hdev;
1086 const struct hccs_die_info *die;
1087 int ret;
1088 u16 i;
1089
1090 mutex_lock(&hdev->lock);
1091 for (i = 0; i < chip->die_num; i++) {
1092 die = &chip->dies[i];
1093 ret = hccs_get_die_total_crc_err_cnt(hdev, die, &crc_err_cnt);
1094 if (ret) {
1095 mutex_unlock(&hdev->lock);
1096 return ret;
1097 }
1098
1099 total_crc_err_cnt += crc_err_cnt;
1100 }
1101 mutex_unlock(&hdev->lock);
1102
1103 return sysfs_emit(buf, "%llu\n", total_crc_err_cnt);
1104}
1105static struct kobj_attribute crc_err_cnt_sum_on_chip_attr =
1106 __ATTR(crc_err_cnt, 0444, crc_err_cnt_sum_on_chip_show, NULL);
1107
1108static struct attribute *hccs_chip_default_attrs[] = {
1109 &all_linked_on_chip_attr.attr,
1110 &linked_full_lane_on_chip_attr.attr,
1111 &crc_err_cnt_sum_on_chip_attr.attr,
1112 NULL,
1113};
1114ATTRIBUTE_GROUPS(hccs_chip_default);
1115
1116static const struct kobj_type hccs_chip_type = {
1117 .sysfs_ops = &hccs_comm_ops,
1118 .default_groups = hccs_chip_default_groups,
1119};
1120
1121static void hccs_remove_die_dir(struct hccs_die_info *die)
1122{
1123 struct hccs_port_info *port;
1124 u8 i;
1125
1126 for (i = 0; i < die->port_num; i++) {
1127 port = &die->ports[i];
1128 if (port->dir_created)
1129 kobject_put(&port->kobj);
1130 }
1131
1132 kobject_put(&die->kobj);
1133}
1134
1135static void hccs_remove_chip_dir(struct hccs_chip_info *chip)
1136{
1137 struct hccs_die_info *die;
1138 u8 i;
1139
1140 for (i = 0; i < chip->die_num; i++) {
1141 die = &chip->dies[i];
1142 if (die->dir_created)
1143 hccs_remove_die_dir(die);
1144 }
1145
1146 kobject_put(&chip->kobj);
1147}
1148
1149static void hccs_remove_topo_dirs(struct hccs_dev *hdev)
1150{
1151 u8 i;
1152
1153 for (i = 0; i < hdev->chip_num; i++)
1154 hccs_remove_chip_dir(&hdev->chips[i]);
1155}
1156
1157static int hccs_create_hccs_dir(struct hccs_dev *hdev,
1158 struct hccs_die_info *die,
1159 struct hccs_port_info *port)
1160{
1161 int ret;
1162
1163 ret = kobject_init_and_add(&port->kobj, &hccs_port_type,
1164 &die->kobj, "hccs%u", port->port_id);
1165 if (ret) {
1166 kobject_put(&port->kobj);
1167 return ret;
1168 }
1169
1170 return 0;
1171}
1172
1173static int hccs_create_die_dir(struct hccs_dev *hdev,
1174 struct hccs_chip_info *chip,
1175 struct hccs_die_info *die)
1176{
1177 struct hccs_port_info *port;
1178 int ret;
1179 u16 i;
1180
1181 ret = kobject_init_and_add(&die->kobj, &hccs_die_type,
1182 &chip->kobj, "die%u", die->die_id);
1183 if (ret) {
1184 kobject_put(&die->kobj);
1185 return ret;
1186 }
1187
1188 for (i = 0; i < die->port_num; i++) {
1189 port = &die->ports[i];
1190 ret = hccs_create_hccs_dir(hdev, die, port);
1191 if (ret) {
1192 dev_err(hdev->dev, "create hccs%u dir failed.\n",
1193 port->port_id);
1194 goto err;
1195 }
1196 port->dir_created = true;
1197 }
1198
1199 return 0;
1200err:
1201 hccs_remove_die_dir(die);
1202
1203 return ret;
1204}
1205
1206static int hccs_create_chip_dir(struct hccs_dev *hdev,
1207 struct hccs_chip_info *chip)
1208{
1209 struct hccs_die_info *die;
1210 int ret;
1211 u16 id;
1212
1213 ret = kobject_init_and_add(&chip->kobj, &hccs_chip_type,
1214 &hdev->dev->kobj, "chip%u", chip->chip_id);
1215 if (ret) {
1216 kobject_put(&chip->kobj);
1217 return ret;
1218 }
1219
1220 for (id = 0; id < chip->die_num; id++) {
1221 die = &chip->dies[id];
1222 ret = hccs_create_die_dir(hdev, chip, die);
1223 if (ret)
1224 goto err;
1225 die->dir_created = true;
1226 }
1227
1228 return 0;
1229err:
1230 hccs_remove_chip_dir(chip);
1231
1232 return ret;
1233}
1234
1235static int hccs_create_topo_dirs(struct hccs_dev *hdev)
1236{
1237 struct hccs_chip_info *chip;
1238 u8 id, k;
1239 int ret;
1240
1241 for (id = 0; id < hdev->chip_num; id++) {
1242 chip = &hdev->chips[id];
1243 ret = hccs_create_chip_dir(hdev, chip);
1244 if (ret) {
1245 dev_err(hdev->dev, "init chip%u dir failed!\n", id);
1246 goto err;
1247 }
1248 }
1249
1250 return 0;
1251err:
1252 for (k = 0; k < id; k++)
1253 hccs_remove_chip_dir(&hdev->chips[k]);
1254
1255 return ret;
1256}
1257
1258static int hccs_probe(struct platform_device *pdev)
1259{
1260 struct acpi_device *acpi_dev;
1261 struct hccs_dev *hdev;
1262 int rc;
1263
1264 if (acpi_disabled) {
1265 dev_err(&pdev->dev, "acpi is disabled.\n");
1266 return -ENODEV;
1267 }
1268 acpi_dev = ACPI_COMPANION(&pdev->dev);
1269 if (!acpi_dev)
1270 return -ENODEV;
1271
1272 hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
1273 if (!hdev)
1274 return -ENOMEM;
1275 hdev->acpi_dev = acpi_dev;
1276 hdev->dev = &pdev->dev;
1277 platform_set_drvdata(pdev, hdev);
1278
1279 /*
1280 * Here would never be failure as the driver and device has been matched.
1281 */
1282 hdev->verspec_data = acpi_device_get_match_data(hdev->dev);
1283
1284 mutex_init(&hdev->lock);
1285 rc = hccs_get_pcc_chan_id(hdev);
1286 if (rc)
1287 return rc;
1288 rc = hccs_register_pcc_channel(hdev);
1289 if (rc)
1290 return rc;
1291
1292 rc = hccs_get_dev_caps(hdev);
1293 if (rc)
1294 goto unregister_pcc_chan;
1295
1296 rc = hccs_get_hw_info(hdev);
1297 if (rc)
1298 goto unregister_pcc_chan;
1299
1300 rc = hccs_create_topo_dirs(hdev);
1301 if (rc)
1302 goto unregister_pcc_chan;
1303
1304 return 0;
1305
1306unregister_pcc_chan:
1307 hccs_unregister_pcc_channel(hdev);
1308
1309 return rc;
1310}
1311
1312static void hccs_remove(struct platform_device *pdev)
1313{
1314 struct hccs_dev *hdev = platform_get_drvdata(pdev);
1315
1316 hccs_remove_topo_dirs(hdev);
1317 hccs_unregister_pcc_channel(hdev);
1318}
1319
1320static const struct hccs_verspecific_data hisi04b1_verspec_data = {
1321 .rx_callback = NULL,
1322 .wait_cmd_complete = hccs_wait_cmd_complete_by_poll,
1323 .fill_pcc_shared_mem = hccs_fill_pcc_shared_mem_region,
1324 .shared_mem_size = sizeof(struct acpi_pcct_shared_memory),
1325 .has_txdone_irq = false,
1326};
1327
1328static const struct hccs_verspecific_data hisi04b2_verspec_data = {
1329 .rx_callback = hccs_pcc_rx_callback,
1330 .wait_cmd_complete = hccs_wait_cmd_complete_by_irq,
1331 .fill_pcc_shared_mem = hccs_fill_ext_pcc_shared_mem_region,
1332 .shared_mem_size = sizeof(struct acpi_pcct_ext_pcc_shared_memory),
1333 .has_txdone_irq = true,
1334};
1335
1336static const struct acpi_device_id hccs_acpi_match[] = {
1337 { "HISI04B1", (unsigned long)&hisi04b1_verspec_data},
1338 { "HISI04B2", (unsigned long)&hisi04b2_verspec_data},
1339 { }
1340};
1341MODULE_DEVICE_TABLE(acpi, hccs_acpi_match);
1342
1343static struct platform_driver hccs_driver = {
1344 .probe = hccs_probe,
1345 .remove_new = hccs_remove,
1346 .driver = {
1347 .name = "kunpeng_hccs",
1348 .acpi_match_table = hccs_acpi_match,
1349 },
1350};
1351
1352module_platform_driver(hccs_driver);
1353
1354MODULE_DESCRIPTION("Kunpeng SoC HCCS driver");
1355MODULE_LICENSE("GPL");
1356MODULE_AUTHOR("Huisong Li <lihuisong@huawei.com>");