Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Mellanox hotplug driver
4 *
5 * Copyright (C) 2016-2020 Mellanox Technologies
6 */
7
8#include <linux/bitops.h>
9#include <linux/device.h>
10#include <linux/hwmon.h>
11#include <linux/hwmon-sysfs.h>
12#include <linux/i2c.h>
13#include <linux/interrupt.h>
14#include <linux/module.h>
15#include <linux/platform_data/mlxreg.h>
16#include <linux/platform_device.h>
17#include <linux/spinlock.h>
18#include <linux/string_helpers.h>
19#include <linux/regmap.h>
20#include <linux/workqueue.h>
21
22/* Offset of event and mask registers from status register. */
23#define MLXREG_HOTPLUG_EVENT_OFF 1
24#define MLXREG_HOTPLUG_MASK_OFF 2
25#define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
26
27/* ASIC good health mask. */
28#define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
29
30#define MLXREG_HOTPLUG_ATTRS_MAX 128
31#define MLXREG_HOTPLUG_NOT_ASSERT 3
32
33/**
34 * struct mlxreg_hotplug_priv_data - platform private data:
35 * @irq: platform device interrupt number;
36 * @dev: basic device;
37 * @pdev: platform device;
38 * @plat: platform data;
39 * @regmap: register map handle;
40 * @dwork_irq: delayed work template;
41 * @lock: spin lock;
42 * @hwmon: hwmon device;
43 * @mlxreg_hotplug_attr: sysfs attributes array;
44 * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
45 * @group: sysfs attribute group;
46 * @groups: list of sysfs attribute group for hwmon registration;
47 * @cell: location of top aggregation interrupt register;
48 * @mask: top aggregation interrupt common mask;
49 * @aggr_cache: last value of aggregation register status;
50 * @after_probe: flag indication probing completion;
51 * @not_asserted: number of entries in workqueue with no signal assertion;
52 */
53struct mlxreg_hotplug_priv_data {
54 int irq;
55 struct device *dev;
56 struct platform_device *pdev;
57 struct mlxreg_hotplug_platform_data *plat;
58 struct regmap *regmap;
59 struct delayed_work dwork_irq;
60 spinlock_t lock; /* sync with interrupt */
61 struct device *hwmon;
62 struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
63 struct sensor_device_attribute_2
64 mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
65 struct attribute_group group;
66 const struct attribute_group *groups[2];
67 u32 cell;
68 u32 mask;
69 u32 aggr_cache;
70 bool after_probe;
71 u8 not_asserted;
72};
73
74/* Environment variables array for udev. */
75static char *mlxreg_hotplug_udev_envp[] = { NULL, NULL };
76
77static int
78mlxreg_hotplug_udev_event_send(struct kobject *kobj,
79 struct mlxreg_core_data *data, bool action)
80{
81 char event_str[MLXREG_CORE_LABEL_MAX_SIZE + 2];
82 char label[MLXREG_CORE_LABEL_MAX_SIZE] = { 0 };
83
84 mlxreg_hotplug_udev_envp[0] = event_str;
85 string_upper(label, data->label);
86 snprintf(event_str, MLXREG_CORE_LABEL_MAX_SIZE, "%s=%d", label, !!action);
87
88 return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp);
89}
90
91static void
92mlxreg_hotplug_pdata_export(void *pdata, void *regmap)
93{
94 struct mlxreg_core_hotplug_platform_data *dev_pdata = pdata;
95
96 /* Export regmap to underlying device. */
97 dev_pdata->regmap = regmap;
98}
99
100static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
101 struct mlxreg_core_data *data,
102 enum mlxreg_hotplug_kind kind)
103{
104 struct i2c_board_info *brdinfo = data->hpdev.brdinfo;
105 struct mlxreg_core_hotplug_platform_data *pdata;
106 struct i2c_client *client;
107
108 /* Notify user by sending hwmon uevent. */
109 mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, true);
110
111 /*
112 * Return if adapter number is negative. It could be in case hotplug
113 * event is not associated with hotplug device.
114 */
115 if (data->hpdev.nr < 0 && data->hpdev.action != MLXREG_HOTPLUG_DEVICE_NO_ACTION)
116 return 0;
117
118 pdata = dev_get_platdata(&priv->pdev->dev);
119 switch (data->hpdev.action) {
120 case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
121 data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
122 pdata->shift_nr);
123 if (!data->hpdev.adapter) {
124 dev_err(priv->dev, "Failed to get adapter for bus %d\n",
125 data->hpdev.nr + pdata->shift_nr);
126 return -EFAULT;
127 }
128
129 /* Export platform data to underlying device. */
130 if (brdinfo->platform_data)
131 mlxreg_hotplug_pdata_export(brdinfo->platform_data, pdata->regmap);
132
133 client = i2c_new_client_device(data->hpdev.adapter,
134 brdinfo);
135 if (IS_ERR(client)) {
136 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
137 brdinfo->type, data->hpdev.nr +
138 pdata->shift_nr, brdinfo->addr);
139
140 i2c_put_adapter(data->hpdev.adapter);
141 data->hpdev.adapter = NULL;
142 return PTR_ERR(client);
143 }
144
145 data->hpdev.client = client;
146 break;
147 case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
148 /* Export platform data to underlying device. */
149 if (data->hpdev.brdinfo && data->hpdev.brdinfo->platform_data)
150 mlxreg_hotplug_pdata_export(data->hpdev.brdinfo->platform_data,
151 pdata->regmap);
152 /* Pass parent hotplug device handle to underlying device. */
153 data->notifier = data->hpdev.notifier;
154 data->hpdev.pdev = platform_device_register_resndata(&priv->pdev->dev,
155 brdinfo->type,
156 data->hpdev.nr,
157 NULL, 0, data,
158 sizeof(*data));
159 if (IS_ERR(data->hpdev.pdev))
160 return PTR_ERR(data->hpdev.pdev);
161
162 break;
163 default:
164 break;
165 }
166
167 if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
168 return data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 1);
169
170 return 0;
171}
172
173static void
174mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
175 struct mlxreg_core_data *data,
176 enum mlxreg_hotplug_kind kind)
177{
178 /* Notify user by sending hwmon uevent. */
179 mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false);
180 if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
181 data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 0);
182
183 switch (data->hpdev.action) {
184 case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
185 if (data->hpdev.client) {
186 i2c_unregister_device(data->hpdev.client);
187 data->hpdev.client = NULL;
188 }
189
190 if (data->hpdev.adapter) {
191 i2c_put_adapter(data->hpdev.adapter);
192 data->hpdev.adapter = NULL;
193 }
194 break;
195 case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
196 if (data->hpdev.pdev)
197 platform_device_unregister(data->hpdev.pdev);
198 break;
199 default:
200 break;
201 }
202}
203
204static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
205 struct device_attribute *attr,
206 char *buf)
207{
208 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
209 struct mlxreg_core_hotplug_platform_data *pdata;
210 int index = to_sensor_dev_attr_2(attr)->index;
211 int nr = to_sensor_dev_attr_2(attr)->nr;
212 struct mlxreg_core_item *item;
213 struct mlxreg_core_data *data;
214 u32 regval;
215 int ret;
216
217 pdata = dev_get_platdata(&priv->pdev->dev);
218 item = pdata->items + nr;
219 data = item->data + index;
220
221 ret = regmap_read(priv->regmap, data->reg, ®val);
222 if (ret)
223 return ret;
224
225 if (item->health) {
226 regval &= data->mask;
227 } else {
228 /* Bit = 0 : functional if item->inversed is true. */
229 if (item->inversed)
230 regval = !(regval & data->mask);
231 else
232 regval = !!(regval & data->mask);
233 }
234
235 return sprintf(buf, "%u\n", regval);
236}
237
238#define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
239#define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
240
241static int mlxreg_hotplug_item_label_index_get(u32 mask, u32 bit)
242{
243 int i, j;
244
245 for (i = 0, j = -1; i <= bit; i++) {
246 if (mask & BIT(i))
247 j++;
248 }
249 return j;
250}
251
252static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
253{
254 struct mlxreg_core_hotplug_platform_data *pdata;
255 struct mlxreg_core_item *item;
256 struct mlxreg_core_data *data;
257 unsigned long mask;
258 u32 regval;
259 int num_attrs = 0, id = 0, i, j, k, count, ret;
260
261 pdata = dev_get_platdata(&priv->pdev->dev);
262 item = pdata->items;
263
264 /* Go over all kinds of items - psu, pwr, fan. */
265 for (i = 0; i < pdata->counter; i++, item++) {
266 if (item->capability) {
267 /*
268 * Read group capability register to get actual number
269 * of interrupt capable components and set group mask
270 * accordingly.
271 */
272 ret = regmap_read(priv->regmap, item->capability,
273 ®val);
274 if (ret)
275 return ret;
276
277 item->mask = GENMASK((regval & item->mask) - 1, 0);
278 }
279
280 data = item->data;
281
282 /* Go over all unmasked units within item. */
283 mask = item->mask;
284 k = 0;
285 count = item->ind ? item->ind : item->count;
286 for_each_set_bit(j, &mask, count) {
287 if (data->capability) {
288 /*
289 * Read capability register and skip non
290 * relevant attributes.
291 */
292 ret = regmap_read(priv->regmap,
293 data->capability, ®val);
294 if (ret)
295 return ret;
296
297 if (!(regval & data->bit)) {
298 data++;
299 continue;
300 }
301 }
302
303 PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
304 PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
305 GFP_KERNEL,
306 data->label);
307 if (!PRIV_ATTR(id)->name) {
308 dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
309 id);
310 return -ENOMEM;
311 }
312
313 PRIV_DEV_ATTR(id).dev_attr.attr.name =
314 PRIV_ATTR(id)->name;
315 PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
316 PRIV_DEV_ATTR(id).dev_attr.show =
317 mlxreg_hotplug_attr_show;
318 PRIV_DEV_ATTR(id).nr = i;
319 PRIV_DEV_ATTR(id).index = k;
320 sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
321 data++;
322 id++;
323 k++;
324 }
325 num_attrs += k;
326 }
327
328 priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
329 num_attrs,
330 sizeof(struct attribute *),
331 GFP_KERNEL);
332 if (!priv->group.attrs)
333 return -ENOMEM;
334
335 priv->group.attrs = priv->mlxreg_hotplug_attr;
336 priv->groups[0] = &priv->group;
337 priv->groups[1] = NULL;
338
339 return 0;
340}
341
342static void
343mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
344 struct mlxreg_core_item *item)
345{
346 struct mlxreg_core_data *data;
347 unsigned long asserted;
348 u32 regval, bit;
349 int ret;
350
351 /*
352 * Validate if item related to received signal type is valid.
353 * It should never happen, excepted the situation when some
354 * piece of hardware is broken. In such situation just produce
355 * error message and return. Caller must continue to handle the
356 * signals from other devices if any.
357 */
358 if (unlikely(!item)) {
359 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
360 item->reg, item->mask);
361
362 return;
363 }
364
365 /* Mask event. */
366 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
367 0);
368 if (ret)
369 goto out;
370
371 /* Read status. */
372 ret = regmap_read(priv->regmap, item->reg, ®val);
373 if (ret)
374 goto out;
375
376 /* Set asserted bits and save last status. */
377 regval &= item->mask;
378 asserted = item->cache ^ regval;
379 item->cache = regval;
380 for_each_set_bit(bit, &asserted, 8) {
381 int pos;
382
383 pos = mlxreg_hotplug_item_label_index_get(item->mask, bit);
384 if (pos < 0)
385 goto out;
386
387 data = item->data + pos;
388 if (regval & BIT(bit)) {
389 if (item->inversed)
390 mlxreg_hotplug_device_destroy(priv, data, item->kind);
391 else
392 mlxreg_hotplug_device_create(priv, data, item->kind);
393 } else {
394 if (item->inversed)
395 mlxreg_hotplug_device_create(priv, data, item->kind);
396 else
397 mlxreg_hotplug_device_destroy(priv, data, item->kind);
398 }
399 }
400
401 /* Acknowledge event. */
402 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
403 0);
404 if (ret)
405 goto out;
406
407 /* Unmask event. */
408 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
409 item->mask);
410
411 out:
412 if (ret)
413 dev_err(priv->dev, "Failed to complete workqueue.\n");
414}
415
416static void
417mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
418 struct mlxreg_core_item *item)
419{
420 struct mlxreg_core_data *data = item->data;
421 u32 regval;
422 int i, ret = 0;
423
424 for (i = 0; i < item->count; i++, data++) {
425 /* Mask event. */
426 ret = regmap_write(priv->regmap, data->reg +
427 MLXREG_HOTPLUG_MASK_OFF, 0);
428 if (ret)
429 goto out;
430
431 /* Read status. */
432 ret = regmap_read(priv->regmap, data->reg, ®val);
433 if (ret)
434 goto out;
435
436 regval &= data->mask;
437
438 if (item->cache == regval)
439 goto ack_event;
440
441 /*
442 * ASIC health indication is provided through two bits. Bits
443 * value 0x2 indicates that ASIC reached the good health, value
444 * 0x0 indicates ASIC the bad health or dormant state and value
445 * 0x3 indicates the booting state. During ASIC reset it should
446 * pass the following states: dormant -> booting -> good.
447 */
448 if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
449 if (!data->attached) {
450 /*
451 * ASIC is in steady state. Connect associated
452 * device, if configured.
453 */
454 mlxreg_hotplug_device_create(priv, data, item->kind);
455 data->attached = true;
456 }
457 } else {
458 if (data->attached) {
459 /*
460 * ASIC health is failed after ASIC has been
461 * in steady state. Disconnect associated
462 * device, if it has been connected.
463 */
464 mlxreg_hotplug_device_destroy(priv, data, item->kind);
465 data->attached = false;
466 data->health_cntr = 0;
467 }
468 }
469 item->cache = regval;
470ack_event:
471 /* Acknowledge event. */
472 ret = regmap_write(priv->regmap, data->reg +
473 MLXREG_HOTPLUG_EVENT_OFF, 0);
474 if (ret)
475 goto out;
476
477 /* Unmask event. */
478 ret = regmap_write(priv->regmap, data->reg +
479 MLXREG_HOTPLUG_MASK_OFF, data->mask);
480 if (ret)
481 goto out;
482 }
483
484 out:
485 if (ret)
486 dev_err(priv->dev, "Failed to complete workqueue.\n");
487}
488
489/*
490 * mlxreg_hotplug_work_handler - performs traversing of device interrupt
491 * registers according to the below hierarchy schema:
492 *
493 * Aggregation registers (status/mask)
494 * PSU registers: *---*
495 * *-----------------* | |
496 * |status/event/mask|-----> | * |
497 * *-----------------* | |
498 * Power registers: | |
499 * *-----------------* | |
500 * |status/event/mask|-----> | * |
501 * *-----------------* | |
502 * FAN registers: | |--> CPU
503 * *-----------------* | |
504 * |status/event/mask|-----> | * |
505 * *-----------------* | |
506 * ASIC registers: | |
507 * *-----------------* | |
508 * |status/event/mask|-----> | * |
509 * *-----------------* | |
510 * *---*
511 *
512 * In case some system changed are detected: FAN in/out, PSU in/out, power
513 * cable attached/detached, ASIC health good/bad, relevant device is created
514 * or destroyed.
515 */
516static void mlxreg_hotplug_work_handler(struct work_struct *work)
517{
518 struct mlxreg_core_hotplug_platform_data *pdata;
519 struct mlxreg_hotplug_priv_data *priv;
520 struct mlxreg_core_item *item;
521 u32 regval, aggr_asserted;
522 unsigned long flags;
523 int i, ret;
524
525 priv = container_of(work, struct mlxreg_hotplug_priv_data,
526 dwork_irq.work);
527 pdata = dev_get_platdata(&priv->pdev->dev);
528 item = pdata->items;
529
530 /* Mask aggregation event. */
531 ret = regmap_write(priv->regmap, pdata->cell +
532 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
533 if (ret < 0)
534 goto out;
535
536 /* Read aggregation status. */
537 ret = regmap_read(priv->regmap, pdata->cell, ®val);
538 if (ret)
539 goto out;
540
541 regval &= pdata->mask;
542 aggr_asserted = priv->aggr_cache ^ regval;
543 priv->aggr_cache = regval;
544
545 /*
546 * Handler is invoked, but no assertion is detected at top aggregation
547 * status level. Set aggr_asserted to mask value to allow handler extra
548 * run over all relevant signals to recover any missed signal.
549 */
550 if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
551 priv->not_asserted = 0;
552 aggr_asserted = pdata->mask;
553 }
554 if (!aggr_asserted)
555 goto unmask_event;
556
557 /* Handle topology and health configuration changes. */
558 for (i = 0; i < pdata->counter; i++, item++) {
559 if (aggr_asserted & item->aggr_mask) {
560 if (item->health)
561 mlxreg_hotplug_health_work_helper(priv, item);
562 else
563 mlxreg_hotplug_work_helper(priv, item);
564 }
565 }
566
567 spin_lock_irqsave(&priv->lock, flags);
568
569 /*
570 * It is possible, that some signals have been inserted, while
571 * interrupt has been masked by mlxreg_hotplug_work_handler. In this
572 * case such signals will be missed. In order to handle these signals
573 * delayed work is canceled and work task re-scheduled for immediate
574 * execution. It allows to handle missed signals, if any. In other case
575 * work handler just validates that no new signals have been received
576 * during masking.
577 */
578 cancel_delayed_work(&priv->dwork_irq);
579 schedule_delayed_work(&priv->dwork_irq, 0);
580
581 spin_unlock_irqrestore(&priv->lock, flags);
582
583 return;
584
585unmask_event:
586 priv->not_asserted++;
587 /* Unmask aggregation event (no need acknowledge). */
588 ret = regmap_write(priv->regmap, pdata->cell +
589 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
590
591 out:
592 if (ret)
593 dev_err(priv->dev, "Failed to complete workqueue.\n");
594}
595
596static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
597{
598 struct mlxreg_core_hotplug_platform_data *pdata;
599 struct mlxreg_core_item *item;
600 struct mlxreg_core_data *data;
601 u32 regval;
602 int i, j, ret;
603
604 pdata = dev_get_platdata(&priv->pdev->dev);
605 item = pdata->items;
606
607 for (i = 0; i < pdata->counter; i++, item++) {
608 /* Clear group presense event. */
609 ret = regmap_write(priv->regmap, item->reg +
610 MLXREG_HOTPLUG_EVENT_OFF, 0);
611 if (ret)
612 goto out;
613
614 /*
615 * Verify if hardware configuration requires to disable
616 * interrupt capability for some of components.
617 */
618 data = item->data;
619 for (j = 0; j < item->count; j++, data++) {
620 /* Verify if the attribute has capability register. */
621 if (data->capability) {
622 /* Read capability register. */
623 ret = regmap_read(priv->regmap,
624 data->capability, ®val);
625 if (ret)
626 goto out;
627
628 if (!(regval & data->bit))
629 item->mask &= ~BIT(j);
630 }
631 }
632
633 /* Set group initial status as mask and unmask group event. */
634 if (item->inversed) {
635 item->cache = item->mask;
636 ret = regmap_write(priv->regmap, item->reg +
637 MLXREG_HOTPLUG_MASK_OFF,
638 item->mask);
639 if (ret)
640 goto out;
641 }
642 }
643
644 /* Keep aggregation initial status as zero and unmask events. */
645 ret = regmap_write(priv->regmap, pdata->cell +
646 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
647 if (ret)
648 goto out;
649
650 /* Keep low aggregation initial status as zero and unmask events. */
651 if (pdata->cell_low) {
652 ret = regmap_write(priv->regmap, pdata->cell_low +
653 MLXREG_HOTPLUG_AGGR_MASK_OFF,
654 pdata->mask_low);
655 if (ret)
656 goto out;
657 }
658
659 /* Invoke work handler for initializing hot plug devices setting. */
660 mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
661
662 out:
663 if (ret)
664 dev_err(priv->dev, "Failed to set interrupts.\n");
665 enable_irq(priv->irq);
666 return ret;
667}
668
669static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
670{
671 struct mlxreg_core_hotplug_platform_data *pdata;
672 struct mlxreg_core_item *item;
673 struct mlxreg_core_data *data;
674 int count, i, j;
675
676 pdata = dev_get_platdata(&priv->pdev->dev);
677 item = pdata->items;
678 disable_irq(priv->irq);
679 cancel_delayed_work_sync(&priv->dwork_irq);
680
681 /* Mask low aggregation event, if defined. */
682 if (pdata->cell_low)
683 regmap_write(priv->regmap, pdata->cell_low +
684 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
685
686 /* Mask aggregation event. */
687 regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
688 0);
689
690 /* Clear topology configurations. */
691 for (i = 0; i < pdata->counter; i++, item++) {
692 data = item->data;
693 /* Mask group presense event. */
694 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
695 0);
696 /* Clear group presense event. */
697 regmap_write(priv->regmap, data->reg +
698 MLXREG_HOTPLUG_EVENT_OFF, 0);
699
700 /* Remove all the attached devices in group. */
701 count = item->count;
702 for (j = 0; j < count; j++, data++)
703 mlxreg_hotplug_device_destroy(priv, data, item->kind);
704 }
705}
706
707static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
708{
709 struct mlxreg_hotplug_priv_data *priv;
710
711 priv = (struct mlxreg_hotplug_priv_data *)dev;
712
713 /* Schedule work task for immediate execution.*/
714 schedule_delayed_work(&priv->dwork_irq, 0);
715
716 return IRQ_HANDLED;
717}
718
719static int mlxreg_hotplug_probe(struct platform_device *pdev)
720{
721 struct mlxreg_core_hotplug_platform_data *pdata;
722 struct mlxreg_hotplug_priv_data *priv;
723 struct i2c_adapter *deferred_adap;
724 int err;
725
726 pdata = dev_get_platdata(&pdev->dev);
727 if (!pdata) {
728 dev_err(&pdev->dev, "Failed to get platform data.\n");
729 return -EINVAL;
730 }
731
732 /* Defer probing if the necessary adapter is not configured yet. */
733 deferred_adap = i2c_get_adapter(pdata->deferred_nr);
734 if (!deferred_adap)
735 return -EPROBE_DEFER;
736 i2c_put_adapter(deferred_adap);
737
738 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
739 if (!priv)
740 return -ENOMEM;
741
742 if (pdata->irq) {
743 priv->irq = pdata->irq;
744 } else {
745 priv->irq = platform_get_irq(pdev, 0);
746 if (priv->irq < 0)
747 return priv->irq;
748 }
749
750 priv->regmap = pdata->regmap;
751 priv->dev = pdev->dev.parent;
752 priv->pdev = pdev;
753
754 err = devm_request_irq(&pdev->dev, priv->irq,
755 mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
756 | IRQF_SHARED, "mlxreg-hotplug", priv);
757 if (err) {
758 dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
759 return err;
760 }
761
762 disable_irq(priv->irq);
763 spin_lock_init(&priv->lock);
764 INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
765 dev_set_drvdata(&pdev->dev, priv);
766
767 err = mlxreg_hotplug_attr_init(priv);
768 if (err) {
769 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
770 err);
771 return err;
772 }
773
774 priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
775 "mlxreg_hotplug", priv, priv->groups);
776 if (IS_ERR(priv->hwmon)) {
777 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
778 PTR_ERR(priv->hwmon));
779 return PTR_ERR(priv->hwmon);
780 }
781
782 /* Perform initial interrupts setup. */
783 mlxreg_hotplug_set_irq(priv);
784 priv->after_probe = true;
785
786 return 0;
787}
788
789static void mlxreg_hotplug_remove(struct platform_device *pdev)
790{
791 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
792
793 /* Clean interrupts setup. */
794 mlxreg_hotplug_unset_irq(priv);
795 devm_free_irq(&pdev->dev, priv->irq, priv);
796}
797
798static struct platform_driver mlxreg_hotplug_driver = {
799 .driver = {
800 .name = "mlxreg-hotplug",
801 },
802 .probe = mlxreg_hotplug_probe,
803 .remove_new = mlxreg_hotplug_remove,
804};
805
806module_platform_driver(mlxreg_hotplug_driver);
807
808MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
809MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
810MODULE_LICENSE("Dual BSD/GPL");
811MODULE_ALIAS("platform:mlxreg-hotplug");
1/*
2 * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the names of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2 as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <linux/bitops.h>
35#include <linux/device.h>
36#include <linux/hwmon.h>
37#include <linux/hwmon-sysfs.h>
38#include <linux/i2c.h>
39#include <linux/interrupt.h>
40#include <linux/module.h>
41#include <linux/of_device.h>
42#include <linux/platform_data/mlxreg.h>
43#include <linux/platform_device.h>
44#include <linux/spinlock.h>
45#include <linux/regmap.h>
46#include <linux/workqueue.h>
47
48/* Offset of event and mask registers from status register. */
49#define MLXREG_HOTPLUG_EVENT_OFF 1
50#define MLXREG_HOTPLUG_MASK_OFF 2
51#define MLXREG_HOTPLUG_AGGR_MASK_OFF 1
52
53/* ASIC good health mask. */
54#define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
55
56#define MLXREG_HOTPLUG_ATTRS_MAX 24
57#define MLXREG_HOTPLUG_NOT_ASSERT 3
58
59/**
60 * struct mlxreg_hotplug_priv_data - platform private data:
61 * @irq: platform device interrupt number;
62 * @dev: basic device;
63 * @pdev: platform device;
64 * @plat: platform data;
65 * @regmap: register map handle;
66 * @dwork_irq: delayed work template;
67 * @lock: spin lock;
68 * @hwmon: hwmon device;
69 * @mlxreg_hotplug_attr: sysfs attributes array;
70 * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
71 * @group: sysfs attribute group;
72 * @groups: list of sysfs attribute group for hwmon registration;
73 * @cell: location of top aggregation interrupt register;
74 * @mask: top aggregation interrupt common mask;
75 * @aggr_cache: last value of aggregation register status;
76 * @after_probe: flag indication probing completion;
77 * @not_asserted: number of entries in workqueue with no signal assertion;
78 */
79struct mlxreg_hotplug_priv_data {
80 int irq;
81 struct device *dev;
82 struct platform_device *pdev;
83 struct mlxreg_hotplug_platform_data *plat;
84 struct regmap *regmap;
85 struct delayed_work dwork_irq;
86 spinlock_t lock; /* sync with interrupt */
87 struct device *hwmon;
88 struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
89 struct sensor_device_attribute_2
90 mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
91 struct attribute_group group;
92 const struct attribute_group *groups[2];
93 u32 cell;
94 u32 mask;
95 u32 aggr_cache;
96 bool after_probe;
97 u8 not_asserted;
98};
99
100static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
101 struct mlxreg_core_data *data)
102{
103 struct mlxreg_core_hotplug_platform_data *pdata;
104
105 /* Notify user by sending hwmon uevent. */
106 kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
107
108 /*
109 * Return if adapter number is negative. It could be in case hotplug
110 * event is not associated with hotplug device.
111 */
112 if (data->hpdev.nr < 0)
113 return 0;
114
115 pdata = dev_get_platdata(&priv->pdev->dev);
116 data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
117 pdata->shift_nr);
118 if (!data->hpdev.adapter) {
119 dev_err(priv->dev, "Failed to get adapter for bus %d\n",
120 data->hpdev.nr + pdata->shift_nr);
121 return -EFAULT;
122 }
123
124 data->hpdev.client = i2c_new_device(data->hpdev.adapter,
125 data->hpdev.brdinfo);
126 if (!data->hpdev.client) {
127 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
128 data->hpdev.brdinfo->type, data->hpdev.nr +
129 pdata->shift_nr, data->hpdev.brdinfo->addr);
130
131 i2c_put_adapter(data->hpdev.adapter);
132 data->hpdev.adapter = NULL;
133 return -EFAULT;
134 }
135
136 return 0;
137}
138
139static void
140mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
141 struct mlxreg_core_data *data)
142{
143 /* Notify user by sending hwmon uevent. */
144 kobject_uevent(&priv->hwmon->kobj, KOBJ_CHANGE);
145
146 if (data->hpdev.client) {
147 i2c_unregister_device(data->hpdev.client);
148 data->hpdev.client = NULL;
149 }
150
151 if (data->hpdev.adapter) {
152 i2c_put_adapter(data->hpdev.adapter);
153 data->hpdev.adapter = NULL;
154 }
155}
156
157static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
158 struct device_attribute *attr,
159 char *buf)
160{
161 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
162 struct mlxreg_core_hotplug_platform_data *pdata;
163 int index = to_sensor_dev_attr_2(attr)->index;
164 int nr = to_sensor_dev_attr_2(attr)->nr;
165 struct mlxreg_core_item *item;
166 struct mlxreg_core_data *data;
167 u32 regval;
168 int ret;
169
170 pdata = dev_get_platdata(&priv->pdev->dev);
171 item = pdata->items + nr;
172 data = item->data + index;
173
174 ret = regmap_read(priv->regmap, data->reg, ®val);
175 if (ret)
176 return ret;
177
178 if (item->health) {
179 regval &= data->mask;
180 } else {
181 /* Bit = 0 : functional if item->inversed is true. */
182 if (item->inversed)
183 regval = !(regval & data->mask);
184 else
185 regval = !!(regval & data->mask);
186 }
187
188 return sprintf(buf, "%u\n", regval);
189}
190
191#define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
192#define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
193
194static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
195{
196 struct mlxreg_core_hotplug_platform_data *pdata;
197 struct mlxreg_core_item *item;
198 struct mlxreg_core_data *data;
199 int num_attrs = 0, id = 0, i, j;
200
201 pdata = dev_get_platdata(&priv->pdev->dev);
202 item = pdata->items;
203
204 /* Go over all kinds of items - psu, pwr, fan. */
205 for (i = 0; i < pdata->counter; i++, item++) {
206 num_attrs += item->count;
207 data = item->data;
208 /* Go over all units within the item. */
209 for (j = 0; j < item->count; j++, data++, id++) {
210 PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
211 PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
212 GFP_KERNEL,
213 data->label);
214
215 if (!PRIV_ATTR(id)->name) {
216 dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
217 id);
218 return -ENOMEM;
219 }
220
221 PRIV_DEV_ATTR(id).dev_attr.attr.name =
222 PRIV_ATTR(id)->name;
223 PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
224 PRIV_DEV_ATTR(id).dev_attr.show =
225 mlxreg_hotplug_attr_show;
226 PRIV_DEV_ATTR(id).nr = i;
227 PRIV_DEV_ATTR(id).index = j;
228 sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
229 }
230 }
231
232 priv->group.attrs = devm_kcalloc(&priv->pdev->dev,
233 num_attrs,
234 sizeof(struct attribute *),
235 GFP_KERNEL);
236 if (!priv->group.attrs)
237 return -ENOMEM;
238
239 priv->group.attrs = priv->mlxreg_hotplug_attr;
240 priv->groups[0] = &priv->group;
241 priv->groups[1] = NULL;
242
243 return 0;
244}
245
246static void
247mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
248 struct mlxreg_core_item *item)
249{
250 struct mlxreg_core_data *data;
251 unsigned long asserted;
252 u32 regval, bit;
253 int ret;
254
255 /*
256 * Validate if item related to received signal type is valid.
257 * It should never happen, excepted the situation when some
258 * piece of hardware is broken. In such situation just produce
259 * error message and return. Caller must continue to handle the
260 * signals from other devices if any.
261 */
262 if (unlikely(!item)) {
263 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
264 item->reg, item->mask);
265
266 return;
267 }
268
269 /* Mask event. */
270 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
271 0);
272 if (ret)
273 goto out;
274
275 /* Read status. */
276 ret = regmap_read(priv->regmap, item->reg, ®val);
277 if (ret)
278 goto out;
279
280 /* Set asserted bits and save last status. */
281 regval &= item->mask;
282 asserted = item->cache ^ regval;
283 item->cache = regval;
284
285 for_each_set_bit(bit, &asserted, 8) {
286 data = item->data + bit;
287 if (regval & BIT(bit)) {
288 if (item->inversed)
289 mlxreg_hotplug_device_destroy(priv, data);
290 else
291 mlxreg_hotplug_device_create(priv, data);
292 } else {
293 if (item->inversed)
294 mlxreg_hotplug_device_create(priv, data);
295 else
296 mlxreg_hotplug_device_destroy(priv, data);
297 }
298 }
299
300 /* Acknowledge event. */
301 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
302 0);
303 if (ret)
304 goto out;
305
306 /* Unmask event. */
307 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
308 item->mask);
309
310 out:
311 if (ret)
312 dev_err(priv->dev, "Failed to complete workqueue.\n");
313}
314
315static void
316mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
317 struct mlxreg_core_item *item)
318{
319 struct mlxreg_core_data *data = item->data;
320 u32 regval;
321 int i, ret = 0;
322
323 for (i = 0; i < item->count; i++, data++) {
324 /* Mask event. */
325 ret = regmap_write(priv->regmap, data->reg +
326 MLXREG_HOTPLUG_MASK_OFF, 0);
327 if (ret)
328 goto out;
329
330 /* Read status. */
331 ret = regmap_read(priv->regmap, data->reg, ®val);
332 if (ret)
333 goto out;
334
335 regval &= data->mask;
336
337 if (item->cache == regval)
338 goto ack_event;
339
340 /*
341 * ASIC health indication is provided through two bits. Bits
342 * value 0x2 indicates that ASIC reached the good health, value
343 * 0x0 indicates ASIC the bad health or dormant state and value
344 * 0x3 indicates the booting state. During ASIC reset it should
345 * pass the following states: dormant -> booting -> good.
346 */
347 if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) {
348 if (!data->attached) {
349 /*
350 * ASIC is in steady state. Connect associated
351 * device, if configured.
352 */
353 mlxreg_hotplug_device_create(priv, data);
354 data->attached = true;
355 }
356 } else {
357 if (data->attached) {
358 /*
359 * ASIC health is failed after ASIC has been
360 * in steady state. Disconnect associated
361 * device, if it has been connected.
362 */
363 mlxreg_hotplug_device_destroy(priv, data);
364 data->attached = false;
365 data->health_cntr = 0;
366 }
367 }
368 item->cache = regval;
369ack_event:
370 /* Acknowledge event. */
371 ret = regmap_write(priv->regmap, data->reg +
372 MLXREG_HOTPLUG_EVENT_OFF, 0);
373 if (ret)
374 goto out;
375
376 /* Unmask event. */
377 ret = regmap_write(priv->regmap, data->reg +
378 MLXREG_HOTPLUG_MASK_OFF, data->mask);
379 if (ret)
380 goto out;
381 }
382
383 out:
384 if (ret)
385 dev_err(priv->dev, "Failed to complete workqueue.\n");
386}
387
388/*
389 * mlxreg_hotplug_work_handler - performs traversing of device interrupt
390 * registers according to the below hierarchy schema:
391 *
392 * Aggregation registers (status/mask)
393 * PSU registers: *---*
394 * *-----------------* | |
395 * |status/event/mask|-----> | * |
396 * *-----------------* | |
397 * Power registers: | |
398 * *-----------------* | |
399 * |status/event/mask|-----> | * |
400 * *-----------------* | |
401 * FAN registers: | |--> CPU
402 * *-----------------* | |
403 * |status/event/mask|-----> | * |
404 * *-----------------* | |
405 * ASIC registers: | |
406 * *-----------------* | |
407 * |status/event/mask|-----> | * |
408 * *-----------------* | |
409 * *---*
410 *
411 * In case some system changed are detected: FAN in/out, PSU in/out, power
412 * cable attached/detached, ASIC health good/bad, relevant device is created
413 * or destroyed.
414 */
415static void mlxreg_hotplug_work_handler(struct work_struct *work)
416{
417 struct mlxreg_core_hotplug_platform_data *pdata;
418 struct mlxreg_hotplug_priv_data *priv;
419 struct mlxreg_core_item *item;
420 u32 regval, aggr_asserted;
421 unsigned long flags;
422 int i, ret;
423
424 priv = container_of(work, struct mlxreg_hotplug_priv_data,
425 dwork_irq.work);
426 pdata = dev_get_platdata(&priv->pdev->dev);
427 item = pdata->items;
428
429 /* Mask aggregation event. */
430 ret = regmap_write(priv->regmap, pdata->cell +
431 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
432 if (ret < 0)
433 goto out;
434
435 /* Read aggregation status. */
436 ret = regmap_read(priv->regmap, pdata->cell, ®val);
437 if (ret)
438 goto out;
439
440 regval &= pdata->mask;
441 aggr_asserted = priv->aggr_cache ^ regval;
442 priv->aggr_cache = regval;
443
444 /*
445 * Handler is invoked, but no assertion is detected at top aggregation
446 * status level. Set aggr_asserted to mask value to allow handler extra
447 * run over all relevant signals to recover any missed signal.
448 */
449 if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) {
450 priv->not_asserted = 0;
451 aggr_asserted = pdata->mask;
452 }
453 if (!aggr_asserted)
454 goto unmask_event;
455
456 /* Handle topology and health configuration changes. */
457 for (i = 0; i < pdata->counter; i++, item++) {
458 if (aggr_asserted & item->aggr_mask) {
459 if (item->health)
460 mlxreg_hotplug_health_work_helper(priv, item);
461 else
462 mlxreg_hotplug_work_helper(priv, item);
463 }
464 }
465
466 spin_lock_irqsave(&priv->lock, flags);
467
468 /*
469 * It is possible, that some signals have been inserted, while
470 * interrupt has been masked by mlxreg_hotplug_work_handler. In this
471 * case such signals will be missed. In order to handle these signals
472 * delayed work is canceled and work task re-scheduled for immediate
473 * execution. It allows to handle missed signals, if any. In other case
474 * work handler just validates that no new signals have been received
475 * during masking.
476 */
477 cancel_delayed_work(&priv->dwork_irq);
478 schedule_delayed_work(&priv->dwork_irq, 0);
479
480 spin_unlock_irqrestore(&priv->lock, flags);
481
482 return;
483
484unmask_event:
485 priv->not_asserted++;
486 /* Unmask aggregation event (no need acknowledge). */
487 ret = regmap_write(priv->regmap, pdata->cell +
488 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
489
490 out:
491 if (ret)
492 dev_err(priv->dev, "Failed to complete workqueue.\n");
493}
494
495static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
496{
497 struct mlxreg_core_hotplug_platform_data *pdata;
498 struct mlxreg_core_item *item;
499 struct mlxreg_core_data *data;
500 u32 regval;
501 int i, j, ret;
502
503 pdata = dev_get_platdata(&priv->pdev->dev);
504 item = pdata->items;
505
506 for (i = 0; i < pdata->counter; i++, item++) {
507 /* Clear group presense event. */
508 ret = regmap_write(priv->regmap, item->reg +
509 MLXREG_HOTPLUG_EVENT_OFF, 0);
510 if (ret)
511 goto out;
512
513 /*
514 * Verify if hardware configuration requires to disable
515 * interrupt capability for some of components.
516 */
517 data = item->data;
518 for (j = 0; j < item->count; j++, data++) {
519 /* Verify if the attribute has capability register. */
520 if (data->capability) {
521 /* Read capability register. */
522 ret = regmap_read(priv->regmap,
523 data->capability, ®val);
524 if (ret)
525 goto out;
526
527 if (!(regval & data->bit))
528 item->mask &= ~BIT(j);
529 }
530 }
531
532 /* Set group initial status as mask and unmask group event. */
533 if (item->inversed) {
534 item->cache = item->mask;
535 ret = regmap_write(priv->regmap, item->reg +
536 MLXREG_HOTPLUG_MASK_OFF,
537 item->mask);
538 if (ret)
539 goto out;
540 }
541 }
542
543 /* Keep aggregation initial status as zero and unmask events. */
544 ret = regmap_write(priv->regmap, pdata->cell +
545 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
546 if (ret)
547 goto out;
548
549 /* Keep low aggregation initial status as zero and unmask events. */
550 if (pdata->cell_low) {
551 ret = regmap_write(priv->regmap, pdata->cell_low +
552 MLXREG_HOTPLUG_AGGR_MASK_OFF,
553 pdata->mask_low);
554 if (ret)
555 goto out;
556 }
557
558 /* Invoke work handler for initializing hot plug devices setting. */
559 mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
560
561 out:
562 if (ret)
563 dev_err(priv->dev, "Failed to set interrupts.\n");
564 enable_irq(priv->irq);
565 return ret;
566}
567
568static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
569{
570 struct mlxreg_core_hotplug_platform_data *pdata;
571 struct mlxreg_core_item *item;
572 struct mlxreg_core_data *data;
573 int count, i, j;
574
575 pdata = dev_get_platdata(&priv->pdev->dev);
576 item = pdata->items;
577 disable_irq(priv->irq);
578 cancel_delayed_work_sync(&priv->dwork_irq);
579
580 /* Mask low aggregation event, if defined. */
581 if (pdata->cell_low)
582 regmap_write(priv->regmap, pdata->cell_low +
583 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
584
585 /* Mask aggregation event. */
586 regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
587 0);
588
589 /* Clear topology configurations. */
590 for (i = 0; i < pdata->counter; i++, item++) {
591 data = item->data;
592 /* Mask group presense event. */
593 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
594 0);
595 /* Clear group presense event. */
596 regmap_write(priv->regmap, data->reg +
597 MLXREG_HOTPLUG_EVENT_OFF, 0);
598
599 /* Remove all the attached devices in group. */
600 count = item->count;
601 for (j = 0; j < count; j++, data++)
602 mlxreg_hotplug_device_destroy(priv, data);
603 }
604}
605
606static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
607{
608 struct mlxreg_hotplug_priv_data *priv;
609
610 priv = (struct mlxreg_hotplug_priv_data *)dev;
611
612 /* Schedule work task for immediate execution.*/
613 schedule_delayed_work(&priv->dwork_irq, 0);
614
615 return IRQ_HANDLED;
616}
617
618static int mlxreg_hotplug_probe(struct platform_device *pdev)
619{
620 struct mlxreg_core_hotplug_platform_data *pdata;
621 struct mlxreg_hotplug_priv_data *priv;
622 struct i2c_adapter *deferred_adap;
623 int err;
624
625 pdata = dev_get_platdata(&pdev->dev);
626 if (!pdata) {
627 dev_err(&pdev->dev, "Failed to get platform data.\n");
628 return -EINVAL;
629 }
630
631 /* Defer probing if the necessary adapter is not configured yet. */
632 deferred_adap = i2c_get_adapter(pdata->deferred_nr);
633 if (!deferred_adap)
634 return -EPROBE_DEFER;
635 i2c_put_adapter(deferred_adap);
636
637 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
638 if (!priv)
639 return -ENOMEM;
640
641 if (pdata->irq) {
642 priv->irq = pdata->irq;
643 } else {
644 priv->irq = platform_get_irq(pdev, 0);
645 if (priv->irq < 0)
646 return priv->irq;
647 }
648
649 priv->regmap = pdata->regmap;
650 priv->dev = pdev->dev.parent;
651 priv->pdev = pdev;
652
653 err = devm_request_irq(&pdev->dev, priv->irq,
654 mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
655 | IRQF_SHARED, "mlxreg-hotplug", priv);
656 if (err) {
657 dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
658 return err;
659 }
660
661 disable_irq(priv->irq);
662 spin_lock_init(&priv->lock);
663 INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
664 dev_set_drvdata(&pdev->dev, priv);
665
666 err = mlxreg_hotplug_attr_init(priv);
667 if (err) {
668 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
669 err);
670 return err;
671 }
672
673 priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
674 "mlxreg_hotplug", priv, priv->groups);
675 if (IS_ERR(priv->hwmon)) {
676 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
677 PTR_ERR(priv->hwmon));
678 return PTR_ERR(priv->hwmon);
679 }
680
681 /* Perform initial interrupts setup. */
682 mlxreg_hotplug_set_irq(priv);
683 priv->after_probe = true;
684
685 return 0;
686}
687
688static int mlxreg_hotplug_remove(struct platform_device *pdev)
689{
690 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
691
692 /* Clean interrupts setup. */
693 mlxreg_hotplug_unset_irq(priv);
694 devm_free_irq(&pdev->dev, priv->irq, priv);
695
696 return 0;
697}
698
699static struct platform_driver mlxreg_hotplug_driver = {
700 .driver = {
701 .name = "mlxreg-hotplug",
702 },
703 .probe = mlxreg_hotplug_probe,
704 .remove = mlxreg_hotplug_remove,
705};
706
707module_platform_driver(mlxreg_hotplug_driver);
708
709MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
710MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
711MODULE_LICENSE("Dual BSD/GPL");
712MODULE_ALIAS("platform:mlxreg-hotplug");