Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Greybus bundles
  4 *
  5 * Copyright 2014-2015 Google Inc.
  6 * Copyright 2014-2015 Linaro Ltd.
  7 */
  8
  9#include <linux/greybus.h>
 10#include "greybus_trace.h"
 11
 12static ssize_t bundle_class_show(struct device *dev,
 13				 struct device_attribute *attr, char *buf)
 14{
 15	struct gb_bundle *bundle = to_gb_bundle(dev);
 16
 17	return sprintf(buf, "0x%02x\n", bundle->class);
 18}
 19static DEVICE_ATTR_RO(bundle_class);
 20
 21static ssize_t bundle_id_show(struct device *dev,
 22			      struct device_attribute *attr, char *buf)
 23{
 24	struct gb_bundle *bundle = to_gb_bundle(dev);
 25
 26	return sprintf(buf, "%u\n", bundle->id);
 27}
 28static DEVICE_ATTR_RO(bundle_id);
 29
 30static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 31			  char *buf)
 32{
 33	struct gb_bundle *bundle = to_gb_bundle(dev);
 34
 35	if (!bundle->state)
 36		return sprintf(buf, "\n");
 37
 38	return sprintf(buf, "%s\n", bundle->state);
 39}
 40
 41static ssize_t state_store(struct device *dev, struct device_attribute *attr,
 42			   const char *buf, size_t size)
 43{
 44	struct gb_bundle *bundle = to_gb_bundle(dev);
 45
 46	kfree(bundle->state);
 47	bundle->state = kstrdup(buf, GFP_KERNEL);
 48	if (!bundle->state)
 49		return -ENOMEM;
 50
 51	/* Tell userspace that the file contents changed */
 52	sysfs_notify(&bundle->dev.kobj, NULL, "state");
 53
 54	return size;
 55}
 56static DEVICE_ATTR_RW(state);
 57
 58static struct attribute *bundle_attrs[] = {
 59	&dev_attr_bundle_class.attr,
 60	&dev_attr_bundle_id.attr,
 61	&dev_attr_state.attr,
 62	NULL,
 63};
 64
 65ATTRIBUTE_GROUPS(bundle);
 66
 67static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
 68					u8 bundle_id)
 69{
 70	struct gb_bundle *bundle;
 71
 72	list_for_each_entry(bundle, &intf->bundles, links) {
 73		if (bundle->id == bundle_id)
 74			return bundle;
 75	}
 76
 77	return NULL;
 78}
 79
 80static void gb_bundle_release(struct device *dev)
 81{
 82	struct gb_bundle *bundle = to_gb_bundle(dev);
 83
 84	trace_gb_bundle_release(bundle);
 85
 86	kfree(bundle->state);
 87	kfree(bundle->cport_desc);
 88	kfree(bundle);
 89}
 90
 91#ifdef CONFIG_PM
 92static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
 93{
 94	struct gb_connection *connection;
 95
 96	list_for_each_entry(connection, &bundle->connections, bundle_links)
 97		gb_connection_disable(connection);
 98}
 99
100static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
101{
102	struct gb_connection *connection;
103
104	list_for_each_entry(connection, &bundle->connections, bundle_links)
105		gb_connection_enable(connection);
106}
107
108static int gb_bundle_suspend(struct device *dev)
109{
110	struct gb_bundle *bundle = to_gb_bundle(dev);
111	const struct dev_pm_ops *pm = dev->driver->pm;
112	int ret;
113
114	if (pm && pm->runtime_suspend) {
115		ret = pm->runtime_suspend(&bundle->dev);
116		if (ret)
117			return ret;
118	} else {
119		gb_bundle_disable_all_connections(bundle);
120	}
121
122	ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
123	if (ret) {
124		if (pm && pm->runtime_resume)
125			ret = pm->runtime_resume(dev);
126		else
127			gb_bundle_enable_all_connections(bundle);
128
129		return ret;
130	}
131
132	return 0;
133}
134
135static int gb_bundle_resume(struct device *dev)
136{
137	struct gb_bundle *bundle = to_gb_bundle(dev);
138	const struct dev_pm_ops *pm = dev->driver->pm;
139	int ret;
140
141	ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
142	if (ret)
143		return ret;
144
145	if (pm && pm->runtime_resume) {
146		ret = pm->runtime_resume(dev);
147		if (ret)
148			return ret;
149	} else {
150		gb_bundle_enable_all_connections(bundle);
151	}
152
153	return 0;
154}
155
156static int gb_bundle_idle(struct device *dev)
157{
158	pm_runtime_mark_last_busy(dev);
159	pm_request_autosuspend(dev);
160
161	return 0;
162}
163#endif
164
165static const struct dev_pm_ops gb_bundle_pm_ops = {
166	SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
167};
168
169struct device_type greybus_bundle_type = {
170	.name =		"greybus_bundle",
171	.release =	gb_bundle_release,
172	.pm =		&gb_bundle_pm_ops,
173};
174
175/*
176 * Create a gb_bundle structure to represent a discovered
177 * bundle.  Returns a pointer to the new bundle or a null
178 * pointer if a failure occurs due to memory exhaustion.
179 */
180struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
181				   u8 class)
182{
183	struct gb_bundle *bundle;
184
185	if (bundle_id == BUNDLE_ID_NONE) {
186		dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
187		return NULL;
188	}
189
190	/*
191	 * Reject any attempt to reuse a bundle id.  We initialize
192	 * these serially, so there's no need to worry about keeping
193	 * the interface bundle list locked here.
194	 */
195	if (gb_bundle_find(intf, bundle_id)) {
196		dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
197		return NULL;
198	}
199
200	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
201	if (!bundle)
202		return NULL;
203
204	bundle->intf = intf;
205	bundle->id = bundle_id;
206	bundle->class = class;
207	INIT_LIST_HEAD(&bundle->connections);
208
209	bundle->dev.parent = &intf->dev;
210	bundle->dev.bus = &greybus_bus_type;
211	bundle->dev.type = &greybus_bundle_type;
212	bundle->dev.groups = bundle_groups;
213	bundle->dev.dma_mask = intf->dev.dma_mask;
214	device_initialize(&bundle->dev);
215	dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
216
217	list_add(&bundle->links, &intf->bundles);
218
219	trace_gb_bundle_create(bundle);
220
221	return bundle;
222}
223
224int gb_bundle_add(struct gb_bundle *bundle)
225{
226	int ret;
227
228	ret = device_add(&bundle->dev);
229	if (ret) {
230		dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
231		return ret;
232	}
233
234	trace_gb_bundle_add(bundle);
235
236	return 0;
237}
238
239/*
240 * Tear down a previously set up bundle.
241 */
242void gb_bundle_destroy(struct gb_bundle *bundle)
243{
244	trace_gb_bundle_destroy(bundle);
245
246	if (device_is_registered(&bundle->dev))
247		device_del(&bundle->dev);
248
249	list_del(&bundle->links);
250
251	put_device(&bundle->dev);
252}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Greybus bundles
  4 *
  5 * Copyright 2014-2015 Google Inc.
  6 * Copyright 2014-2015 Linaro Ltd.
  7 */
  8
  9#include <linux/greybus.h>
 10#include "greybus_trace.h"
 11
 12static ssize_t bundle_class_show(struct device *dev,
 13				 struct device_attribute *attr, char *buf)
 14{
 15	struct gb_bundle *bundle = to_gb_bundle(dev);
 16
 17	return sprintf(buf, "0x%02x\n", bundle->class);
 18}
 19static DEVICE_ATTR_RO(bundle_class);
 20
 21static ssize_t bundle_id_show(struct device *dev,
 22			      struct device_attribute *attr, char *buf)
 23{
 24	struct gb_bundle *bundle = to_gb_bundle(dev);
 25
 26	return sprintf(buf, "%u\n", bundle->id);
 27}
 28static DEVICE_ATTR_RO(bundle_id);
 29
 30static ssize_t state_show(struct device *dev, struct device_attribute *attr,
 31			  char *buf)
 32{
 33	struct gb_bundle *bundle = to_gb_bundle(dev);
 34
 35	if (!bundle->state)
 36		return sprintf(buf, "\n");
 37
 38	return sprintf(buf, "%s\n", bundle->state);
 39}
 40
 41static ssize_t state_store(struct device *dev, struct device_attribute *attr,
 42			   const char *buf, size_t size)
 43{
 44	struct gb_bundle *bundle = to_gb_bundle(dev);
 45
 46	kfree(bundle->state);
 47	bundle->state = kstrdup(buf, GFP_KERNEL);
 48	if (!bundle->state)
 49		return -ENOMEM;
 50
 51	/* Tell userspace that the file contents changed */
 52	sysfs_notify(&bundle->dev.kobj, NULL, "state");
 53
 54	return size;
 55}
 56static DEVICE_ATTR_RW(state);
 57
 58static struct attribute *bundle_attrs[] = {
 59	&dev_attr_bundle_class.attr,
 60	&dev_attr_bundle_id.attr,
 61	&dev_attr_state.attr,
 62	NULL,
 63};
 64
 65ATTRIBUTE_GROUPS(bundle);
 66
 67static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
 68					u8 bundle_id)
 69{
 70	struct gb_bundle *bundle;
 71
 72	list_for_each_entry(bundle, &intf->bundles, links) {
 73		if (bundle->id == bundle_id)
 74			return bundle;
 75	}
 76
 77	return NULL;
 78}
 79
 80static void gb_bundle_release(struct device *dev)
 81{
 82	struct gb_bundle *bundle = to_gb_bundle(dev);
 83
 84	trace_gb_bundle_release(bundle);
 85
 86	kfree(bundle->state);
 87	kfree(bundle->cport_desc);
 88	kfree(bundle);
 89}
 90
 91#ifdef CONFIG_PM
 92static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
 93{
 94	struct gb_connection *connection;
 95
 96	list_for_each_entry(connection, &bundle->connections, bundle_links)
 97		gb_connection_disable(connection);
 98}
 99
100static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
101{
102	struct gb_connection *connection;
103
104	list_for_each_entry(connection, &bundle->connections, bundle_links)
105		gb_connection_enable(connection);
106}
107
108static int gb_bundle_suspend(struct device *dev)
109{
110	struct gb_bundle *bundle = to_gb_bundle(dev);
111	const struct dev_pm_ops *pm = dev->driver->pm;
112	int ret;
113
114	if (pm && pm->runtime_suspend) {
115		ret = pm->runtime_suspend(&bundle->dev);
116		if (ret)
117			return ret;
118	} else {
119		gb_bundle_disable_all_connections(bundle);
120	}
121
122	ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
123	if (ret) {
124		if (pm && pm->runtime_resume)
125			ret = pm->runtime_resume(dev);
126		else
127			gb_bundle_enable_all_connections(bundle);
128
129		return ret;
130	}
131
132	return 0;
133}
134
135static int gb_bundle_resume(struct device *dev)
136{
137	struct gb_bundle *bundle = to_gb_bundle(dev);
138	const struct dev_pm_ops *pm = dev->driver->pm;
139	int ret;
140
141	ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
142	if (ret)
143		return ret;
144
145	if (pm && pm->runtime_resume) {
146		ret = pm->runtime_resume(dev);
147		if (ret)
148			return ret;
149	} else {
150		gb_bundle_enable_all_connections(bundle);
151	}
152
153	return 0;
154}
155
156static int gb_bundle_idle(struct device *dev)
157{
158	pm_runtime_mark_last_busy(dev);
159	pm_request_autosuspend(dev);
160
161	return 0;
162}
163#endif
164
165static const struct dev_pm_ops gb_bundle_pm_ops = {
166	SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
167};
168
169struct device_type greybus_bundle_type = {
170	.name =		"greybus_bundle",
171	.release =	gb_bundle_release,
172	.pm =		&gb_bundle_pm_ops,
173};
174
175/*
176 * Create a gb_bundle structure to represent a discovered
177 * bundle.  Returns a pointer to the new bundle or a null
178 * pointer if a failure occurs due to memory exhaustion.
179 */
180struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
181				   u8 class)
182{
183	struct gb_bundle *bundle;
184
185	if (bundle_id == BUNDLE_ID_NONE) {
186		dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
187		return NULL;
188	}
189
190	/*
191	 * Reject any attempt to reuse a bundle id.  We initialize
192	 * these serially, so there's no need to worry about keeping
193	 * the interface bundle list locked here.
194	 */
195	if (gb_bundle_find(intf, bundle_id)) {
196		dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
197		return NULL;
198	}
199
200	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
201	if (!bundle)
202		return NULL;
203
204	bundle->intf = intf;
205	bundle->id = bundle_id;
206	bundle->class = class;
207	INIT_LIST_HEAD(&bundle->connections);
208
209	bundle->dev.parent = &intf->dev;
210	bundle->dev.bus = &greybus_bus_type;
211	bundle->dev.type = &greybus_bundle_type;
212	bundle->dev.groups = bundle_groups;
213	bundle->dev.dma_mask = intf->dev.dma_mask;
214	device_initialize(&bundle->dev);
215	dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
216
217	list_add(&bundle->links, &intf->bundles);
218
219	trace_gb_bundle_create(bundle);
220
221	return bundle;
222}
223
224int gb_bundle_add(struct gb_bundle *bundle)
225{
226	int ret;
227
228	ret = device_add(&bundle->dev);
229	if (ret) {
230		dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
231		return ret;
232	}
233
234	trace_gb_bundle_add(bundle);
235
236	return 0;
237}
238
239/*
240 * Tear down a previously set up bundle.
241 */
242void gb_bundle_destroy(struct gb_bundle *bundle)
243{
244	trace_gb_bundle_destroy(bundle);
245
246	if (device_is_registered(&bundle->dev))
247		device_del(&bundle->dev);
248
249	list_del(&bundle->links);
250
251	put_device(&bundle->dev);
252}