Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Management Engine (FME) Partial Reconfiguration
  4 *
  5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Kang Luwei <luwei.kang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Wu Hao <hao.wu@intel.com>
 11 *   Joseph Grecco <joe.grecco@intel.com>
 12 *   Enno Luebbers <enno.luebbers@intel.com>
 13 *   Tim Whisonant <tim.whisonant@intel.com>
 14 *   Ananda Ravuri <ananda.ravuri@intel.com>
 15 *   Christopher Rauer <christopher.rauer@intel.com>
 16 *   Henry Mitchel <henry.mitchel@intel.com>
 17 */
 18
 19#include <linux/types.h>
 20#include <linux/device.h>
 21#include <linux/vmalloc.h>
 22#include <linux/uaccess.h>
 23#include <linux/fpga/fpga-mgr.h>
 24#include <linux/fpga/fpga-bridge.h>
 25#include <linux/fpga/fpga-region.h>
 26#include <linux/fpga-dfl.h>
 27
 28#include "dfl.h"
 29#include "dfl-fme.h"
 30#include "dfl-fme-pr.h"
 31
 32static struct dfl_fme_region *
 33dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
 34{
 35	struct dfl_fme_region *fme_region;
 36
 37	list_for_each_entry(fme_region, &fme->region_list, node)
 38		if (fme_region->port_id == port_id)
 39			return fme_region;
 40
 41	return NULL;
 42}
 43
 44static int dfl_fme_region_match(struct device *dev, const void *data)
 45{
 46	return dev->parent == data;
 47}
 48
 49static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
 50{
 51	struct dfl_fme_region *fme_region;
 52	struct fpga_region *region;
 53
 54	fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
 55	if (!fme_region)
 56		return NULL;
 57
 58	region = fpga_region_class_find(NULL, &fme_region->region->dev,
 59					dfl_fme_region_match);
 60	if (!region)
 61		return NULL;
 62
 63	return region;
 64}
 65
 66static int fme_pr(struct platform_device *pdev, unsigned long arg)
 67{
 68	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 69	void __user *argp = (void __user *)arg;
 70	struct dfl_fpga_fme_port_pr port_pr;
 71	struct fpga_image_info *info;
 72	struct fpga_region *region;
 73	void __iomem *fme_hdr;
 74	struct dfl_fme *fme;
 75	unsigned long minsz;
 76	void *buf = NULL;
 77	size_t length;
 78	int ret = 0;
 79	u64 v;
 80
 81	minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
 82
 83	if (copy_from_user(&port_pr, argp, minsz))
 84		return -EFAULT;
 85
 86	if (port_pr.argsz < minsz || port_pr.flags)
 87		return -EINVAL;
 88
 89	/* get fme header region */
 90	fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
 91					       FME_FEATURE_ID_HEADER);
 92
 93	/* check port id */
 94	v = readq(fme_hdr + FME_HDR_CAP);
 95	if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
 96		dev_dbg(&pdev->dev, "port number more than maximum\n");
 97		return -EINVAL;
 98	}
 99
100	if (!access_ok((void __user *)(unsigned long)port_pr.buffer_address,
101		       port_pr.buffer_size))
102		return -EFAULT;
103
104	/*
105	 * align PR buffer per PR bandwidth, as HW ignores the extra padding
106	 * data automatically.
107	 */
108	length = ALIGN(port_pr.buffer_size, 4);
109
110	buf = vmalloc(length);
111	if (!buf)
112		return -ENOMEM;
113
114	if (copy_from_user(buf,
115			   (void __user *)(unsigned long)port_pr.buffer_address,
116			   port_pr.buffer_size)) {
117		ret = -EFAULT;
118		goto free_exit;
119	}
120
121	/* prepare fpga_image_info for PR */
122	info = fpga_image_info_alloc(&pdev->dev);
123	if (!info) {
124		ret = -ENOMEM;
125		goto free_exit;
126	}
127
128	info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
129
130	mutex_lock(&pdata->lock);
131	fme = dfl_fpga_pdata_get_private(pdata);
132	/* fme device has been unregistered. */
133	if (!fme) {
134		ret = -EINVAL;
135		goto unlock_exit;
136	}
137
138	region = dfl_fme_region_find(fme, port_pr.port_id);
139	if (!region) {
140		ret = -EINVAL;
141		goto unlock_exit;
142	}
143
144	fpga_image_info_free(region->info);
145
146	info->buf = buf;
147	info->count = length;
148	info->region_id = port_pr.port_id;
149	region->info = info;
150
151	ret = fpga_region_program_fpga(region);
152
153	/*
154	 * it allows userspace to reset the PR region's logic by disabling and
155	 * reenabling the bridge to clear things out between accleration runs.
156	 * so no need to hold the bridges after partial reconfiguration.
157	 */
158	if (region->get_bridges)
159		fpga_bridges_put(&region->bridge_list);
160
161	put_device(&region->dev);
162unlock_exit:
163	mutex_unlock(&pdata->lock);
164free_exit:
165	vfree(buf);
166	return ret;
167}
168
169/**
170 * dfl_fme_create_mgr - create fpga mgr platform device as child device
171 *
172 * @pdata: fme platform_device's pdata
173 *
174 * Return: mgr platform device if successful, and error code otherwise.
175 */
176static struct platform_device *
177dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
178		   struct dfl_feature *feature)
179{
180	struct platform_device *mgr, *fme = pdata->dev;
181	struct dfl_fme_mgr_pdata mgr_pdata;
182	int ret = -ENOMEM;
183
184	if (!feature->ioaddr)
185		return ERR_PTR(-ENODEV);
186
187	mgr_pdata.ioaddr = feature->ioaddr;
188
189	/*
190	 * Each FME has only one fpga-mgr, so allocate platform device using
191	 * the same FME platform device id.
192	 */
193	mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
194	if (!mgr)
195		return ERR_PTR(ret);
196
197	mgr->dev.parent = &fme->dev;
198
199	ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
200	if (ret)
201		goto create_mgr_err;
202
203	ret = platform_device_add(mgr);
204	if (ret)
205		goto create_mgr_err;
206
207	return mgr;
208
209create_mgr_err:
210	platform_device_put(mgr);
211	return ERR_PTR(ret);
212}
213
214/**
215 * dfl_fme_destroy_mgr - destroy fpga mgr platform device
216 * @pdata: fme platform device's pdata
217 */
218static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
219{
220	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
221
222	platform_device_unregister(priv->mgr);
223}
224
225/**
226 * dfl_fme_create_bridge - create fme fpga bridge platform device as child
227 *
228 * @pdata: fme platform device's pdata
229 * @port_id: port id for the bridge to be created.
230 *
231 * Return: bridge platform device if successful, and error code otherwise.
232 */
233static struct dfl_fme_bridge *
234dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
235{
236	struct device *dev = &pdata->dev->dev;
237	struct dfl_fme_br_pdata br_pdata;
238	struct dfl_fme_bridge *fme_br;
239	int ret = -ENOMEM;
240
241	fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
242	if (!fme_br)
243		return ERR_PTR(ret);
244
245	br_pdata.cdev = pdata->dfl_cdev;
246	br_pdata.port_id = port_id;
247
248	fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
249					   PLATFORM_DEVID_AUTO);
250	if (!fme_br->br)
251		return ERR_PTR(ret);
252
253	fme_br->br->dev.parent = dev;
254
255	ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
256	if (ret)
257		goto create_br_err;
258
259	ret = platform_device_add(fme_br->br);
260	if (ret)
261		goto create_br_err;
262
263	return fme_br;
264
265create_br_err:
266	platform_device_put(fme_br->br);
267	return ERR_PTR(ret);
268}
269
270/**
271 * dfl_fme_destroy_bridge - destroy fpga bridge platform device
272 * @fme_br: fme bridge to destroy
273 */
274static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
275{
276	platform_device_unregister(fme_br->br);
277}
278
279/**
280 * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
281 * @pdata: fme platform device's pdata
282 */
283static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
284{
285	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
286	struct dfl_fme_bridge *fbridge, *tmp;
287
288	list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
289		list_del(&fbridge->node);
290		dfl_fme_destroy_bridge(fbridge);
291	}
292}
293
294/**
295 * dfl_fme_create_region - create fpga region platform device as child
296 *
297 * @pdata: fme platform device's pdata
298 * @mgr: mgr platform device needed for region
299 * @br: br platform device needed for region
300 * @port_id: port id
301 *
302 * Return: fme region if successful, and error code otherwise.
303 */
304static struct dfl_fme_region *
305dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
306		      struct platform_device *mgr,
307		      struct platform_device *br, int port_id)
308{
309	struct dfl_fme_region_pdata region_pdata;
310	struct device *dev = &pdata->dev->dev;
311	struct dfl_fme_region *fme_region;
312	int ret = -ENOMEM;
313
314	fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
315	if (!fme_region)
316		return ERR_PTR(ret);
317
318	region_pdata.mgr = mgr;
319	region_pdata.br = br;
320
321	/*
322	 * Each FPGA device may have more than one port, so allocate platform
323	 * device using the same port platform device id.
324	 */
325	fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
326	if (!fme_region->region)
327		return ERR_PTR(ret);
328
329	fme_region->region->dev.parent = dev;
330
331	ret = platform_device_add_data(fme_region->region, &region_pdata,
332				       sizeof(region_pdata));
333	if (ret)
334		goto create_region_err;
335
336	ret = platform_device_add(fme_region->region);
337	if (ret)
338		goto create_region_err;
339
340	fme_region->port_id = port_id;
341
342	return fme_region;
343
344create_region_err:
345	platform_device_put(fme_region->region);
346	return ERR_PTR(ret);
347}
348
349/**
350 * dfl_fme_destroy_region - destroy fme region
351 * @fme_region: fme region to destroy
352 */
353static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
354{
355	platform_device_unregister(fme_region->region);
356}
357
358/**
359 * dfl_fme_destroy_regions - destroy all fme regions
360 * @pdata: fme platform device's pdata
361 */
362static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
363{
364	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
365	struct dfl_fme_region *fme_region, *tmp;
366
367	list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
368		list_del(&fme_region->node);
369		dfl_fme_destroy_region(fme_region);
370	}
371}
372
373static int pr_mgmt_init(struct platform_device *pdev,
374			struct dfl_feature *feature)
375{
376	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
377	struct dfl_fme_region *fme_region;
378	struct dfl_fme_bridge *fme_br;
379	struct platform_device *mgr;
380	struct dfl_fme *priv;
381	void __iomem *fme_hdr;
382	int ret = -ENODEV, i = 0;
383	u64 fme_cap, port_offset;
384
385	fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
386					       FME_FEATURE_ID_HEADER);
387
388	mutex_lock(&pdata->lock);
389	priv = dfl_fpga_pdata_get_private(pdata);
390
391	/* Initialize the region and bridge sub device list */
392	INIT_LIST_HEAD(&priv->region_list);
393	INIT_LIST_HEAD(&priv->bridge_list);
394
395	/* Create fpga mgr platform device */
396	mgr = dfl_fme_create_mgr(pdata, feature);
397	if (IS_ERR(mgr)) {
398		dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
399		goto unlock;
400	}
401
402	priv->mgr = mgr;
403
404	/* Read capability register to check number of regions and bridges */
405	fme_cap = readq(fme_hdr + FME_HDR_CAP);
406	for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
407		port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
408		if (!(port_offset & FME_PORT_OFST_IMP))
409			continue;
410
411		/* Create bridge for each port */
412		fme_br = dfl_fme_create_bridge(pdata, i);
413		if (IS_ERR(fme_br)) {
414			ret = PTR_ERR(fme_br);
415			goto destroy_region;
416		}
417
418		list_add(&fme_br->node, &priv->bridge_list);
419
420		/* Create region for each port */
421		fme_region = dfl_fme_create_region(pdata, mgr,
422						   fme_br->br, i);
423		if (IS_ERR(fme_region)) {
424			ret = PTR_ERR(fme_region);
425			goto destroy_region;
426		}
427
428		list_add(&fme_region->node, &priv->region_list);
429	}
430	mutex_unlock(&pdata->lock);
431
432	return 0;
433
434destroy_region:
435	dfl_fme_destroy_regions(pdata);
436	dfl_fme_destroy_bridges(pdata);
437	dfl_fme_destroy_mgr(pdata);
438unlock:
439	mutex_unlock(&pdata->lock);
440	return ret;
441}
442
443static void pr_mgmt_uinit(struct platform_device *pdev,
444			  struct dfl_feature *feature)
445{
446	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
447
448	mutex_lock(&pdata->lock);
449
450	dfl_fme_destroy_regions(pdata);
451	dfl_fme_destroy_bridges(pdata);
452	dfl_fme_destroy_mgr(pdata);
453	mutex_unlock(&pdata->lock);
454}
455
456static long fme_pr_ioctl(struct platform_device *pdev,
457			 struct dfl_feature *feature,
458			 unsigned int cmd, unsigned long arg)
459{
460	long ret;
461
462	switch (cmd) {
463	case DFL_FPGA_FME_PORT_PR:
464		ret = fme_pr(pdev, arg);
465		break;
466	default:
467		ret = -ENODEV;
468	}
469
470	return ret;
471}
472
473const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
474	{.id = FME_FEATURE_ID_PR_MGMT,},
475	{0}
476};
477
478const struct dfl_feature_ops fme_pr_mgmt_ops = {
479	.init = pr_mgmt_init,
480	.uinit = pr_mgmt_uinit,
481	.ioctl = fme_pr_ioctl,
482};
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Driver for FPGA Management Engine (FME) Partial Reconfiguration
  4 *
  5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
  6 *
  7 * Authors:
  8 *   Kang Luwei <luwei.kang@intel.com>
  9 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 10 *   Wu Hao <hao.wu@intel.com>
 11 *   Joseph Grecco <joe.grecco@intel.com>
 12 *   Enno Luebbers <enno.luebbers@intel.com>
 13 *   Tim Whisonant <tim.whisonant@intel.com>
 14 *   Ananda Ravuri <ananda.ravuri@intel.com>
 15 *   Christopher Rauer <christopher.rauer@intel.com>
 16 *   Henry Mitchel <henry.mitchel@intel.com>
 17 */
 18
 19#include <linux/types.h>
 20#include <linux/device.h>
 21#include <linux/vmalloc.h>
 22#include <linux/uaccess.h>
 23#include <linux/fpga/fpga-mgr.h>
 24#include <linux/fpga/fpga-bridge.h>
 25#include <linux/fpga/fpga-region.h>
 26#include <linux/fpga-dfl.h>
 27
 28#include "dfl.h"
 29#include "dfl-fme.h"
 30#include "dfl-fme-pr.h"
 31
 32static struct dfl_fme_region *
 33dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
 34{
 35	struct dfl_fme_region *fme_region;
 36
 37	list_for_each_entry(fme_region, &fme->region_list, node)
 38		if (fme_region->port_id == port_id)
 39			return fme_region;
 40
 41	return NULL;
 42}
 43
 44static int dfl_fme_region_match(struct device *dev, const void *data)
 45{
 46	return dev->parent == data;
 47}
 48
 49static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
 50{
 51	struct dfl_fme_region *fme_region;
 52	struct fpga_region *region;
 53
 54	fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
 55	if (!fme_region)
 56		return NULL;
 57
 58	region = fpga_region_class_find(NULL, &fme_region->region->dev,
 59					dfl_fme_region_match);
 60	if (!region)
 61		return NULL;
 62
 63	return region;
 64}
 65
 66static int fme_pr(struct platform_device *pdev, unsigned long arg)
 67{
 68	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
 69	void __user *argp = (void __user *)arg;
 70	struct dfl_fpga_fme_port_pr port_pr;
 71	struct fpga_image_info *info;
 72	struct fpga_region *region;
 73	void __iomem *fme_hdr;
 74	struct dfl_fme *fme;
 75	unsigned long minsz;
 76	void *buf = NULL;
 77	size_t length;
 78	int ret = 0;
 79	u64 v;
 80
 81	minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
 82
 83	if (copy_from_user(&port_pr, argp, minsz))
 84		return -EFAULT;
 85
 86	if (port_pr.argsz < minsz || port_pr.flags)
 87		return -EINVAL;
 88
 89	/* get fme header region */
 90	fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
 91					       FME_FEATURE_ID_HEADER);
 92
 93	/* check port id */
 94	v = readq(fme_hdr + FME_HDR_CAP);
 95	if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
 96		dev_dbg(&pdev->dev, "port number more than maximum\n");
 97		return -EINVAL;
 98	}
 99
 
 
 
 
100	/*
101	 * align PR buffer per PR bandwidth, as HW ignores the extra padding
102	 * data automatically.
103	 */
104	length = ALIGN(port_pr.buffer_size, 4);
105
106	buf = vmalloc(length);
107	if (!buf)
108		return -ENOMEM;
109
110	if (copy_from_user(buf,
111			   (void __user *)(unsigned long)port_pr.buffer_address,
112			   port_pr.buffer_size)) {
113		ret = -EFAULT;
114		goto free_exit;
115	}
116
117	/* prepare fpga_image_info for PR */
118	info = fpga_image_info_alloc(&pdev->dev);
119	if (!info) {
120		ret = -ENOMEM;
121		goto free_exit;
122	}
123
124	info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
125
126	mutex_lock(&pdata->lock);
127	fme = dfl_fpga_pdata_get_private(pdata);
128	/* fme device has been unregistered. */
129	if (!fme) {
130		ret = -EINVAL;
131		goto unlock_exit;
132	}
133
134	region = dfl_fme_region_find(fme, port_pr.port_id);
135	if (!region) {
136		ret = -EINVAL;
137		goto unlock_exit;
138	}
139
140	fpga_image_info_free(region->info);
141
142	info->buf = buf;
143	info->count = length;
144	info->region_id = port_pr.port_id;
145	region->info = info;
146
147	ret = fpga_region_program_fpga(region);
148
149	/*
150	 * it allows userspace to reset the PR region's logic by disabling and
151	 * reenabling the bridge to clear things out between acceleration runs.
152	 * so no need to hold the bridges after partial reconfiguration.
153	 */
154	if (region->get_bridges)
155		fpga_bridges_put(&region->bridge_list);
156
157	put_device(&region->dev);
158unlock_exit:
159	mutex_unlock(&pdata->lock);
160free_exit:
161	vfree(buf);
162	return ret;
163}
164
165/**
166 * dfl_fme_create_mgr - create fpga mgr platform device as child device
167 *
168 * @pdata: fme platform_device's pdata
169 *
170 * Return: mgr platform device if successful, and error code otherwise.
171 */
172static struct platform_device *
173dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
174		   struct dfl_feature *feature)
175{
176	struct platform_device *mgr, *fme = pdata->dev;
177	struct dfl_fme_mgr_pdata mgr_pdata;
178	int ret = -ENOMEM;
179
180	if (!feature->ioaddr)
181		return ERR_PTR(-ENODEV);
182
183	mgr_pdata.ioaddr = feature->ioaddr;
184
185	/*
186	 * Each FME has only one fpga-mgr, so allocate platform device using
187	 * the same FME platform device id.
188	 */
189	mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
190	if (!mgr)
191		return ERR_PTR(ret);
192
193	mgr->dev.parent = &fme->dev;
194
195	ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
196	if (ret)
197		goto create_mgr_err;
198
199	ret = platform_device_add(mgr);
200	if (ret)
201		goto create_mgr_err;
202
203	return mgr;
204
205create_mgr_err:
206	platform_device_put(mgr);
207	return ERR_PTR(ret);
208}
209
210/**
211 * dfl_fme_destroy_mgr - destroy fpga mgr platform device
212 * @pdata: fme platform device's pdata
213 */
214static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
215{
216	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
217
218	platform_device_unregister(priv->mgr);
219}
220
221/**
222 * dfl_fme_create_bridge - create fme fpga bridge platform device as child
223 *
224 * @pdata: fme platform device's pdata
225 * @port_id: port id for the bridge to be created.
226 *
227 * Return: bridge platform device if successful, and error code otherwise.
228 */
229static struct dfl_fme_bridge *
230dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
231{
232	struct device *dev = &pdata->dev->dev;
233	struct dfl_fme_br_pdata br_pdata;
234	struct dfl_fme_bridge *fme_br;
235	int ret = -ENOMEM;
236
237	fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
238	if (!fme_br)
239		return ERR_PTR(ret);
240
241	br_pdata.cdev = pdata->dfl_cdev;
242	br_pdata.port_id = port_id;
243
244	fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
245					   PLATFORM_DEVID_AUTO);
246	if (!fme_br->br)
247		return ERR_PTR(ret);
248
249	fme_br->br->dev.parent = dev;
250
251	ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
252	if (ret)
253		goto create_br_err;
254
255	ret = platform_device_add(fme_br->br);
256	if (ret)
257		goto create_br_err;
258
259	return fme_br;
260
261create_br_err:
262	platform_device_put(fme_br->br);
263	return ERR_PTR(ret);
264}
265
266/**
267 * dfl_fme_destroy_bridge - destroy fpga bridge platform device
268 * @fme_br: fme bridge to destroy
269 */
270static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
271{
272	platform_device_unregister(fme_br->br);
273}
274
275/**
276 * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
277 * @pdata: fme platform device's pdata
278 */
279static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
280{
281	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
282	struct dfl_fme_bridge *fbridge, *tmp;
283
284	list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
285		list_del(&fbridge->node);
286		dfl_fme_destroy_bridge(fbridge);
287	}
288}
289
290/**
291 * dfl_fme_create_region - create fpga region platform device as child
292 *
293 * @pdata: fme platform device's pdata
294 * @mgr: mgr platform device needed for region
295 * @br: br platform device needed for region
296 * @port_id: port id
297 *
298 * Return: fme region if successful, and error code otherwise.
299 */
300static struct dfl_fme_region *
301dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
302		      struct platform_device *mgr,
303		      struct platform_device *br, int port_id)
304{
305	struct dfl_fme_region_pdata region_pdata;
306	struct device *dev = &pdata->dev->dev;
307	struct dfl_fme_region *fme_region;
308	int ret = -ENOMEM;
309
310	fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
311	if (!fme_region)
312		return ERR_PTR(ret);
313
314	region_pdata.mgr = mgr;
315	region_pdata.br = br;
316
317	/*
318	 * Each FPGA device may have more than one port, so allocate platform
319	 * device using the same port platform device id.
320	 */
321	fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
322	if (!fme_region->region)
323		return ERR_PTR(ret);
324
325	fme_region->region->dev.parent = dev;
326
327	ret = platform_device_add_data(fme_region->region, &region_pdata,
328				       sizeof(region_pdata));
329	if (ret)
330		goto create_region_err;
331
332	ret = platform_device_add(fme_region->region);
333	if (ret)
334		goto create_region_err;
335
336	fme_region->port_id = port_id;
337
338	return fme_region;
339
340create_region_err:
341	platform_device_put(fme_region->region);
342	return ERR_PTR(ret);
343}
344
345/**
346 * dfl_fme_destroy_region - destroy fme region
347 * @fme_region: fme region to destroy
348 */
349static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
350{
351	platform_device_unregister(fme_region->region);
352}
353
354/**
355 * dfl_fme_destroy_regions - destroy all fme regions
356 * @pdata: fme platform device's pdata
357 */
358static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
359{
360	struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
361	struct dfl_fme_region *fme_region, *tmp;
362
363	list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
364		list_del(&fme_region->node);
365		dfl_fme_destroy_region(fme_region);
366	}
367}
368
369static int pr_mgmt_init(struct platform_device *pdev,
370			struct dfl_feature *feature)
371{
372	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
373	struct dfl_fme_region *fme_region;
374	struct dfl_fme_bridge *fme_br;
375	struct platform_device *mgr;
376	struct dfl_fme *priv;
377	void __iomem *fme_hdr;
378	int ret = -ENODEV, i = 0;
379	u64 fme_cap, port_offset;
380
381	fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
382					       FME_FEATURE_ID_HEADER);
383
384	mutex_lock(&pdata->lock);
385	priv = dfl_fpga_pdata_get_private(pdata);
386
387	/* Initialize the region and bridge sub device list */
388	INIT_LIST_HEAD(&priv->region_list);
389	INIT_LIST_HEAD(&priv->bridge_list);
390
391	/* Create fpga mgr platform device */
392	mgr = dfl_fme_create_mgr(pdata, feature);
393	if (IS_ERR(mgr)) {
394		dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
395		goto unlock;
396	}
397
398	priv->mgr = mgr;
399
400	/* Read capability register to check number of regions and bridges */
401	fme_cap = readq(fme_hdr + FME_HDR_CAP);
402	for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
403		port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
404		if (!(port_offset & FME_PORT_OFST_IMP))
405			continue;
406
407		/* Create bridge for each port */
408		fme_br = dfl_fme_create_bridge(pdata, i);
409		if (IS_ERR(fme_br)) {
410			ret = PTR_ERR(fme_br);
411			goto destroy_region;
412		}
413
414		list_add(&fme_br->node, &priv->bridge_list);
415
416		/* Create region for each port */
417		fme_region = dfl_fme_create_region(pdata, mgr,
418						   fme_br->br, i);
419		if (IS_ERR(fme_region)) {
420			ret = PTR_ERR(fme_region);
421			goto destroy_region;
422		}
423
424		list_add(&fme_region->node, &priv->region_list);
425	}
426	mutex_unlock(&pdata->lock);
427
428	return 0;
429
430destroy_region:
431	dfl_fme_destroy_regions(pdata);
432	dfl_fme_destroy_bridges(pdata);
433	dfl_fme_destroy_mgr(pdata);
434unlock:
435	mutex_unlock(&pdata->lock);
436	return ret;
437}
438
439static void pr_mgmt_uinit(struct platform_device *pdev,
440			  struct dfl_feature *feature)
441{
442	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
443
444	mutex_lock(&pdata->lock);
445
446	dfl_fme_destroy_regions(pdata);
447	dfl_fme_destroy_bridges(pdata);
448	dfl_fme_destroy_mgr(pdata);
449	mutex_unlock(&pdata->lock);
450}
451
452static long fme_pr_ioctl(struct platform_device *pdev,
453			 struct dfl_feature *feature,
454			 unsigned int cmd, unsigned long arg)
455{
456	long ret;
457
458	switch (cmd) {
459	case DFL_FPGA_FME_PORT_PR:
460		ret = fme_pr(pdev, arg);
461		break;
462	default:
463		ret = -ENODEV;
464	}
465
466	return ret;
467}
468
469const struct dfl_feature_id fme_pr_mgmt_id_table[] = {
470	{.id = FME_FEATURE_ID_PR_MGMT,},
471	{0}
472};
473
474const struct dfl_feature_ops fme_pr_mgmt_ops = {
475	.init = pr_mgmt_init,
476	.uinit = pr_mgmt_uinit,
477	.ioctl = fme_pr_ioctl,
478};