Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*******************************************************************************
  3 * Filename:  target_core_hba.c
  4 *
  5 * This file contains the TCM HBA Transport related functions.
  6 *
  7 * (c) Copyright 2003-2013 Datera, Inc.
  8 *
  9 * Nicholas A. Bellinger <nab@kernel.org>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 10 *
 11 ******************************************************************************/
 12
 13#include <linux/net.h>
 14#include <linux/string.h>
 15#include <linux/timer.h>
 16#include <linux/slab.h>
 17#include <linux/spinlock.h>
 18#include <linux/in.h>
 19#include <linux/module.h>
 20#include <net/sock.h>
 21#include <net/tcp.h>
 22
 23#include <target/target_core_base.h>
 24#include <target/target_core_backend.h>
 25#include <target/target_core_fabric.h>
 26
 27#include "target_core_internal.h"
 28
 29static LIST_HEAD(backend_list);
 30static DEFINE_MUTEX(backend_mutex);
 31
 32static u32 hba_id_counter;
 33
 34static DEFINE_SPINLOCK(hba_lock);
 35static LIST_HEAD(hba_list);
 36
 37
 38int transport_backend_register(const struct target_backend_ops *ops)
 39{
 40	struct target_backend *tb, *old;
 41
 42	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
 43	if (!tb)
 44		return -ENOMEM;
 45	tb->ops = ops;
 46
 47	mutex_lock(&backend_mutex);
 48	list_for_each_entry(old, &backend_list, list) {
 49		if (!strcmp(old->ops->name, ops->name)) {
 50			pr_err("backend %s already registered.\n", ops->name);
 51			mutex_unlock(&backend_mutex);
 52			kfree(tb);
 53			return -EEXIST;
 54		}
 55	}
 56	target_setup_backend_cits(tb);
 57	list_add_tail(&tb->list, &backend_list);
 58	mutex_unlock(&backend_mutex);
 59
 60	pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
 61			ops->name, ops->owner);
 62	return 0;
 63}
 64EXPORT_SYMBOL(transport_backend_register);
 65
 66void target_backend_unregister(const struct target_backend_ops *ops)
 67{
 68	struct target_backend *tb;
 69
 70	mutex_lock(&backend_mutex);
 71	list_for_each_entry(tb, &backend_list, list) {
 72		if (tb->ops == ops) {
 73			list_del(&tb->list);
 74			mutex_unlock(&backend_mutex);
 75			/*
 76			 * Wait for any outstanding backend driver ->rcu_head
 77			 * callbacks to complete post TBO->free_device() ->
 78			 * call_rcu(), before allowing backend driver module
 79			 * unload of target_backend_ops->owner to proceed.
 80			 */
 81			rcu_barrier();
 82			kfree(tb);
 83			return;
 84		}
 85	}
 86	mutex_unlock(&backend_mutex);
 87}
 88EXPORT_SYMBOL(target_backend_unregister);
 89
 90static struct target_backend *core_get_backend(const char *name)
 91{
 92	struct target_backend *tb;
 93
 94	mutex_lock(&backend_mutex);
 95	list_for_each_entry(tb, &backend_list, list) {
 96		if (!strcmp(tb->ops->name, name))
 97			goto found;
 98	}
 99	mutex_unlock(&backend_mutex);
100	return NULL;
101found:
102	if (tb->ops->owner && !try_module_get(tb->ops->owner))
103		tb = NULL;
104	mutex_unlock(&backend_mutex);
105	return tb;
106}
107
108struct se_hba *
109core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
110{
111	struct se_hba *hba;
112	int ret = 0;
113
114	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
115	if (!hba) {
116		pr_err("Unable to allocate struct se_hba\n");
117		return ERR_PTR(-ENOMEM);
118	}
119
120	spin_lock_init(&hba->device_lock);
121	mutex_init(&hba->hba_access_mutex);
122
123	hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
124	hba->hba_flags |= hba_flags;
125
126	hba->backend = core_get_backend(plugin_name);
127	if (!hba->backend) {
128		ret = -EINVAL;
129		goto out_free_hba;
130	}
131
132	ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
133	if (ret < 0)
134		goto out_module_put;
135
136	spin_lock(&hba_lock);
137	hba->hba_id = hba_id_counter++;
138	list_add_tail(&hba->hba_node, &hba_list);
139	spin_unlock(&hba_lock);
140
141	pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
142			" Core\n", hba->hba_id);
143
144	return hba;
145
146out_module_put:
147	module_put(hba->backend->ops->owner);
148	hba->backend = NULL;
149out_free_hba:
150	kfree(hba);
151	return ERR_PTR(ret);
152}
153
154int
155core_delete_hba(struct se_hba *hba)
156{
157	WARN_ON(hba->dev_count);
158
159	hba->backend->ops->detach_hba(hba);
160
161	spin_lock(&hba_lock);
162	list_del(&hba->hba_node);
163	spin_unlock(&hba_lock);
164
165	pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
166			" Core\n", hba->hba_id);
167
168	module_put(hba->backend->ops->owner);
169
170	hba->backend = NULL;
171	kfree(hba);
172	return 0;
173}
174
175bool target_sense_desc_format(struct se_device *dev)
176{
177	return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
178}
v4.17
 
  1/*******************************************************************************
  2 * Filename:  target_core_hba.c
  3 *
  4 * This file contains the TCM HBA Transport related functions.
  5 *
  6 * (c) Copyright 2003-2013 Datera, Inc.
  7 *
  8 * Nicholas A. Bellinger <nab@kernel.org>
  9 *
 10 * This program is free software; you can redistribute it and/or modify
 11 * it under the terms of the GNU General Public License as published by
 12 * the Free Software Foundation; either version 2 of the License, or
 13 * (at your option) any later version.
 14 *
 15 * This program is distributed in the hope that it will be useful,
 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 18 * GNU General Public License for more details.
 19 *
 20 * You should have received a copy of the GNU General Public License
 21 * along with this program; if not, write to the Free Software
 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 23 *
 24 ******************************************************************************/
 25
 26#include <linux/net.h>
 27#include <linux/string.h>
 28#include <linux/timer.h>
 29#include <linux/slab.h>
 30#include <linux/spinlock.h>
 31#include <linux/in.h>
 32#include <linux/module.h>
 33#include <net/sock.h>
 34#include <net/tcp.h>
 35
 36#include <target/target_core_base.h>
 37#include <target/target_core_backend.h>
 38#include <target/target_core_fabric.h>
 39
 40#include "target_core_internal.h"
 41
 42static LIST_HEAD(backend_list);
 43static DEFINE_MUTEX(backend_mutex);
 44
 45static u32 hba_id_counter;
 46
 47static DEFINE_SPINLOCK(hba_lock);
 48static LIST_HEAD(hba_list);
 49
 50
 51int transport_backend_register(const struct target_backend_ops *ops)
 52{
 53	struct target_backend *tb, *old;
 54
 55	tb = kzalloc(sizeof(*tb), GFP_KERNEL);
 56	if (!tb)
 57		return -ENOMEM;
 58	tb->ops = ops;
 59
 60	mutex_lock(&backend_mutex);
 61	list_for_each_entry(old, &backend_list, list) {
 62		if (!strcmp(old->ops->name, ops->name)) {
 63			pr_err("backend %s already registered.\n", ops->name);
 64			mutex_unlock(&backend_mutex);
 65			kfree(tb);
 66			return -EEXIST;
 67		}
 68	}
 69	target_setup_backend_cits(tb);
 70	list_add_tail(&tb->list, &backend_list);
 71	mutex_unlock(&backend_mutex);
 72
 73	pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n",
 74			ops->name, ops->owner);
 75	return 0;
 76}
 77EXPORT_SYMBOL(transport_backend_register);
 78
 79void target_backend_unregister(const struct target_backend_ops *ops)
 80{
 81	struct target_backend *tb;
 82
 83	mutex_lock(&backend_mutex);
 84	list_for_each_entry(tb, &backend_list, list) {
 85		if (tb->ops == ops) {
 86			list_del(&tb->list);
 87			mutex_unlock(&backend_mutex);
 88			/*
 89			 * Wait for any outstanding backend driver ->rcu_head
 90			 * callbacks to complete post TBO->free_device() ->
 91			 * call_rcu(), before allowing backend driver module
 92			 * unload of target_backend_ops->owner to proceed.
 93			 */
 94			rcu_barrier();
 95			kfree(tb);
 96			return;
 97		}
 98	}
 99	mutex_unlock(&backend_mutex);
100}
101EXPORT_SYMBOL(target_backend_unregister);
102
103static struct target_backend *core_get_backend(const char *name)
104{
105	struct target_backend *tb;
106
107	mutex_lock(&backend_mutex);
108	list_for_each_entry(tb, &backend_list, list) {
109		if (!strcmp(tb->ops->name, name))
110			goto found;
111	}
112	mutex_unlock(&backend_mutex);
113	return NULL;
114found:
115	if (tb->ops->owner && !try_module_get(tb->ops->owner))
116		tb = NULL;
117	mutex_unlock(&backend_mutex);
118	return tb;
119}
120
121struct se_hba *
122core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
123{
124	struct se_hba *hba;
125	int ret = 0;
126
127	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
128	if (!hba) {
129		pr_err("Unable to allocate struct se_hba\n");
130		return ERR_PTR(-ENOMEM);
131	}
132
133	spin_lock_init(&hba->device_lock);
134	mutex_init(&hba->hba_access_mutex);
135
136	hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
137	hba->hba_flags |= hba_flags;
138
139	hba->backend = core_get_backend(plugin_name);
140	if (!hba->backend) {
141		ret = -EINVAL;
142		goto out_free_hba;
143	}
144
145	ret = hba->backend->ops->attach_hba(hba, plugin_dep_id);
146	if (ret < 0)
147		goto out_module_put;
148
149	spin_lock(&hba_lock);
150	hba->hba_id = hba_id_counter++;
151	list_add_tail(&hba->hba_node, &hba_list);
152	spin_unlock(&hba_lock);
153
154	pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
155			" Core\n", hba->hba_id);
156
157	return hba;
158
159out_module_put:
160	module_put(hba->backend->ops->owner);
161	hba->backend = NULL;
162out_free_hba:
163	kfree(hba);
164	return ERR_PTR(ret);
165}
166
167int
168core_delete_hba(struct se_hba *hba)
169{
170	WARN_ON(hba->dev_count);
171
172	hba->backend->ops->detach_hba(hba);
173
174	spin_lock(&hba_lock);
175	list_del(&hba->hba_node);
176	spin_unlock(&hba_lock);
177
178	pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
179			" Core\n", hba->hba_id);
180
181	module_put(hba->backend->ops->owner);
182
183	hba->backend = NULL;
184	kfree(hba);
185	return 0;
186}
187
188bool target_sense_desc_format(struct se_device *dev)
189{
190	return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
191}