target_core_tpg.c 18.1 KB
Newer Older
1 2 3 4 5
/*******************************************************************************
 * Filename:  target_core_tpg.c
 *
 * This file contains generic Target Portal Group related functions.
 *
6
 * (c) Copyright 2002-2013 Datera, Inc.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *
 * Nicholas A. Bellinger <nab@kernel.org>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
 ******************************************************************************/

#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
32
#include <linux/export.h>
33 34
#include <net/sock.h>
#include <net/tcp.h>
35
#include <scsi/scsi_proto.h>
36 37

#include <target/target_core_base.h>
38 39
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
40

41
#include "target_core_internal.h"
42
#include "target_core_alua.h"
43
#include "target_core_pr.h"
44
#include "target_core_ua.h"
45 46 47 48 49

extern struct se_device *g_lun0_dev;

static DEFINE_SPINLOCK(tpg_lock);
static LIST_HEAD(tpg_list);
50 51 52

/*	__core_tpg_get_initiator_node_acl():
 *
53
 *	mutex_lock(&tpg->acl_node_mutex); must be held when calling
54 55 56 57 58 59 60 61
 */
struct se_node_acl *__core_tpg_get_initiator_node_acl(
	struct se_portal_group *tpg,
	const char *initiatorname)
{
	struct se_node_acl *acl;

	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
62
		if (!strcmp(acl->initiatorname, initiatorname))
63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
			return acl;
	}

	return NULL;
}

/*	core_tpg_get_initiator_node_acl():
 *
 *
 */
struct se_node_acl *core_tpg_get_initiator_node_acl(
	struct se_portal_group *tpg,
	unsigned char *initiatorname)
{
	struct se_node_acl *acl;
78 79 80 81 82 83 84 85 86
	/*
	 * Obtain se_node_acl->acl_kref using fabric driver provided
	 * initiatorname[] during node acl endpoint lookup driven by
	 * new se_session login.
	 *
	 * The reference is held until se_session shutdown -> release
	 * occurs via fabric driver invoked transport_deregister_session()
	 * or transport_free_session() code.
	 */
87
	mutex_lock(&tpg->acl_node_mutex);
88
	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
89 90 91 92
	if (acl) {
		if (!kref_get_unless_zero(&acl->acl_kref))
			acl = NULL;
	}
93
	mutex_unlock(&tpg->acl_node_mutex);
94

95
	return acl;
96
}
97
EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
98

99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
void core_allocate_nexus_loss_ua(
	struct se_node_acl *nacl)
{
	struct se_dev_entry *deve;

	if (!nacl)
		return;

	rcu_read_lock();
	hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
		core_scsi3_ua_allocate(deve, 0x29,
			ASCQ_29H_NEXUS_LOSS_OCCURRED);
	rcu_read_unlock();
}
EXPORT_SYMBOL(core_allocate_nexus_loss_ua);

115 116 117 118 119 120
/*	core_tpg_add_node_to_devs():
 *
 *
 */
void core_tpg_add_node_to_devs(
	struct se_node_acl *acl,
121 122
	struct se_portal_group *tpg,
	struct se_lun *lun_orig)
123
{
124
	bool lun_access_ro = true;
125 126 127
	struct se_lun *lun;
	struct se_device *dev;

128 129
	mutex_lock(&tpg->tpg_lun_mutex);
	hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
130
		if (lun_orig && lun != lun_orig)
131 132
			continue;

133 134
		dev = rcu_dereference_check(lun->lun_se_dev,
					    lockdep_is_held(&tpg->tpg_lun_mutex));
135 136 137 138
		/*
		 * By default in LIO-Target $FABRIC_MOD,
		 * demo_mode_write_protect is ON, or READ_ONLY;
		 */
139
		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
140
			lun_access_ro = false;
141 142 143 144 145
		} else {
			/*
			 * Allow only optical drives to issue R/W in default RO
			 * demo mode.
			 */
146
			if (dev->transport->get_device_type(dev) == TYPE_DISK)
147
				lun_access_ro = true;
148
			else
149
				lun_access_ro = false;
150 151
		}

Hannes Reinecke's avatar
Hannes Reinecke committed
152
		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153
			" access for LUN in Demo Mode\n",
154 155
			tpg->se_tpg_tfo->get_fabric_name(),
			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156
			lun_access_ro ? "READ-ONLY" : "READ-WRITE");
157

158
		core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
159
						 lun_access_ro, acl, tpg);
160 161 162 163 164 165 166
		/*
		 * Check to see if there are any existing persistent reservation
		 * APTPL pre-registrations that need to be enabled for this dynamic
		 * LUN ACL now..
		 */
		core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
						    lun->unpacked_lun);
167
	}
168
	mutex_unlock(&tpg->tpg_lun_mutex);
169 170
}

171 172 173
static void
target_set_nacl_queue_depth(struct se_portal_group *tpg,
			    struct se_node_acl *acl, u32 queue_depth)
174
{
175 176
	acl->queue_depth = queue_depth;

177
	if (!acl->queue_depth) {
178
		pr_warn("Queue depth for %s Initiator Node: %s is 0,"
179
			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
180 181 182 183 184
			acl->initiatorname);
		acl->queue_depth = 1;
	}
}

185 186
static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
		const unsigned char *initiatorname)
187
{
188
	struct se_node_acl *acl;
189
	u32 queue_depth;
190

191 192
	acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
			GFP_KERNEL);
193
	if (!acl)
194
		return NULL;
195

196 197
	INIT_LIST_HEAD(&acl->acl_list);
	INIT_LIST_HEAD(&acl->acl_sess_list);
198
	INIT_HLIST_HEAD(&acl->lun_entry_hlist);
199
	kref_init(&acl->acl_kref);
200
	init_completion(&acl->acl_free_comp);
201
	spin_lock_init(&acl->nacl_sess_lock);
202
	mutex_init(&acl->lun_entry_mutex);
203
	atomic_set(&acl->acl_pr_ref_count, 0);
204

205
	if (tpg->se_tpg_tfo->tpg_get_default_depth)
206
		queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
207
	else
208 209 210
		queue_depth = 1;
	target_set_nacl_queue_depth(tpg, acl, queue_depth);

211 212 213
	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
	acl->se_tpg = tpg;
	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
214

215
	tpg->se_tpg_tfo->set_default_node_attributes(acl);
216

217
	return acl;
218 219
}

220
static void target_add_node_acl(struct se_node_acl *acl)
221
{
222
	struct se_portal_group *tpg = acl->se_tpg;
223

224
	mutex_lock(&tpg->acl_node_mutex);
225
	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
226
	mutex_unlock(&tpg->acl_node_mutex);
227 228 229 230 231 232 233 234 235

	pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
		" Initiator Node: %s\n",
		tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg),
		acl->dynamic_node_acl ? "DYNAMIC" : "",
		acl->queue_depth,
		tpg->se_tpg_tfo->get_fabric_name(),
		acl->initiatorname);
236 237
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
bool target_tpg_has_node_acl(struct se_portal_group *tpg,
			     const char *initiatorname)
{
	struct se_node_acl *acl;
	bool found = false;

	mutex_lock(&tpg->acl_node_mutex);
	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
		if (!strcmp(acl->initiatorname, initiatorname)) {
			found = true;
			break;
		}
	}
	mutex_unlock(&tpg->acl_node_mutex);

	return found;
}
EXPORT_SYMBOL(target_tpg_has_node_acl);

257 258 259 260 261 262 263
struct se_node_acl *core_tpg_check_initiator_node_acl(
	struct se_portal_group *tpg,
	unsigned char *initiatorname)
{
	struct se_node_acl *acl;

	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
264
	if (acl)
265 266
		return acl;

267
	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
268 269
		return NULL;

270
	acl = target_alloc_node_acl(tpg, initiatorname);
271
	if (!acl)
272
		return NULL;
273 274 275 276 277 278 279 280 281
	/*
	 * When allocating a dynamically generated node_acl, go ahead
	 * and take the extra kref now before returning to the fabric
	 * driver caller.
	 *
	 * Note this reference will be released at session shutdown
	 * time within transport_free_session() code.
	 */
	kref_get(&acl->acl_kref);
282 283
	acl->dynamic_node_acl = 1;

284 285
	/*
	 * Here we only create demo-mode MappedLUNs from the active
286
	 * TPG LUNs if the fabric is not explicitly asking for
287 288
	 * tpg_check_demo_mode_login_only() == 1.
	 */
289 290
	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
291
		core_tpg_add_node_to_devs(acl, tpg, NULL);
292

293
	target_add_node_acl(acl);
294 295 296 297 298 299 300 301 302 303 304 305
	return acl;
}
EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);

void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
{
	while (atomic_read(&nacl->acl_pr_ref_count) != 0)
		cpu_relax();
}

struct se_node_acl *core_tpg_add_initiator_node_acl(
	struct se_portal_group *tpg,
306
	const char *initiatorname)
307
{
308
	struct se_node_acl *acl;
309

310
	mutex_lock(&tpg->acl_node_mutex);
311
	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
312
	if (acl) {
313 314
		if (acl->dynamic_node_acl) {
			acl->dynamic_node_acl = 0;
315
			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
316 317
				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
318
			mutex_unlock(&tpg->acl_node_mutex);
319
			return acl;
320 321
		}

322
		pr_err("ACL entry for %s Initiator"
323
			" Node %s already exists for TPG %u, ignoring"
324 325
			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
326
		mutex_unlock(&tpg->acl_node_mutex);
327 328
		return ERR_PTR(-EEXIST);
	}
329
	mutex_unlock(&tpg->acl_node_mutex);
330

331 332
	acl = target_alloc_node_acl(tpg, initiatorname);
	if (!acl)
333 334
		return ERR_PTR(-ENOMEM);

335
	target_add_node_acl(acl);
336 337 338
	return acl;
}

339
static void target_shutdown_sessions(struct se_node_acl *acl)
340
{
341
	struct se_session *sess;
342
	unsigned long flags;
343

344
restart:
345
	spin_lock_irqsave(&acl->nacl_sess_lock, flags);
346 347
	list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
		if (sess->sess_tearing_down)
348
			continue;
349

350
		list_del_init(&sess->sess_acl_list);
351
		spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
352 353 354

		if (acl->se_tpg->se_tpg_tfo->close_session)
			acl->se_tpg->se_tpg_tfo->close_session(sess);
355
		goto restart;
356
	}
357 358 359 360 361 362 363 364 365 366 367 368 369
	spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
}

void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
{
	struct se_portal_group *tpg = acl->se_tpg;

	mutex_lock(&tpg->acl_node_mutex);
	if (acl->dynamic_node_acl)
		acl->dynamic_node_acl = 0;
	list_del(&acl->acl_list);
	mutex_unlock(&tpg->acl_node_mutex);

370
	target_shutdown_sessions(acl);
371

372 373 374 375 376 377
	target_put_nacl(acl);
	/*
	 * Wait for last target_put_nacl() to complete in target_complete_nacl()
	 * for active fabric session transport_deregister_session() callbacks.
	 */
	wait_for_completion(&acl->acl_free_comp);
378 379 380 381

	core_tpg_wait_for_nacl_pr_ref(acl);
	core_free_device_list_for_node(acl, tpg);

382
	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
383 384 385
		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
386

387
	kfree(acl);
388 389 390 391 392 393 394
}

/*	core_tpg_set_initiator_node_queue_depth():
 *
 *
 */
int core_tpg_set_initiator_node_queue_depth(
395 396
	struct se_node_acl *acl,
	u32 queue_depth)
397
{
398
	struct se_portal_group *tpg = acl->se_tpg;
399

400 401 402 403 404 405 406
	/*
	 * Allow the setting of se_node_acl queue_depth to be idempotent,
	 * and not force a session shutdown event if the value is not
	 * changing.
	 */
	if (acl->queue_depth == queue_depth)
		return 0;
407 408 409
	/*
	 * User has requested to change the queue depth for a Initiator Node.
	 * Change the value in the Node's struct se_node_acl, and call
410
	 * target_set_nacl_queue_depth() to set the new queue depth.
411
	 */
412 413
	target_set_nacl_queue_depth(tpg, acl, queue_depth);

414 415 416
	/*
	 * Shutdown all pending sessions to force session reinstatement.
	 */
417
	target_shutdown_sessions(acl);
418

419
	pr_debug("Successfully changed queue depth to: %d for Initiator"
420 421
		" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
		acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
422
		tpg->se_tpg_tfo->tpg_get_tag(tpg));
423 424 425 426 427

	return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
/*	core_tpg_set_initiator_node_tag():
 *
 *	Initiator nodeacl tags are not used internally, but may be used by
 *	userspace to emulate aliases or groups.
 *	Returns length of newly-set tag or -EINVAL.
 */
int core_tpg_set_initiator_node_tag(
	struct se_portal_group *tpg,
	struct se_node_acl *acl,
	const char *new_tag)
{
	if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
		return -EINVAL;

	if (!strncmp("NULL", new_tag, 4)) {
		acl->acl_tag[0] = '\0';
		return 0;
	}

	return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);

451 452 453 454
static void core_tpg_lun_ref_release(struct percpu_ref *ref)
{
	struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);

455
	complete(&lun->lun_shutdown_comp);
456 457
}

458
/* Does not change se_wwn->priv. */
459 460 461
int core_tpg_register(
	struct se_wwn *se_wwn,
	struct se_portal_group *se_tpg,
462
	int proto_id)
463
{
464 465
	int ret;

466 467 468 469 470 471 472 473 474 475 476 477 478 479
	if (!se_tpg)
		return -EINVAL;
	/*
	 * For the typical case where core_tpg_register() is called by a
	 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
	 * configfs context, use the original tf_ops pointer already saved
	 * by target-core in target_fabric_make_wwn().
	 *
	 * Otherwise, for special cases like iscsi-target discovery TPGs
	 * the caller is responsible for setting ->se_tpg_tfo ahead of
	 * calling core_tpg_register().
	 */
	if (se_wwn)
		se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
480

481 482 483
	if (!se_tpg->se_tpg_tfo) {
		pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
		return -EINVAL;
484 485
	}

486
	INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
487
	se_tpg->proto_id = proto_id;
488 489 490
	se_tpg->se_tpg_wwn = se_wwn;
	atomic_set(&se_tpg->tpg_pr_ref_count, 0);
	INIT_LIST_HEAD(&se_tpg->acl_node_list);
491
	INIT_LIST_HEAD(&se_tpg->se_tpg_node);
492 493
	INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
	spin_lock_init(&se_tpg->session_lock);
494
	mutex_init(&se_tpg->tpg_lun_mutex);
495
	mutex_init(&se_tpg->acl_node_mutex);
496

497
	if (se_tpg->proto_id >= 0) {
498 499 500 501 502
		se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
		if (IS_ERR(se_tpg->tpg_virt_lun0))
			return PTR_ERR(se_tpg->tpg_virt_lun0);

		ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
503
				true, g_lun0_dev);
504 505 506
		if (ret < 0) {
			kfree(se_tpg->tpg_virt_lun0);
			return ret;
507 508 509
		}
	}

510 511 512
	spin_lock_bh(&tpg_lock);
	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
	spin_unlock_bh(&tpg_lock);
513

514
	pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
515 516 517 518
		 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
		se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
		se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
519 520 521 522 523 524 525

	return 0;
}
EXPORT_SYMBOL(core_tpg_register);

int core_tpg_deregister(struct se_portal_group *se_tpg)
{
526
	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
527
	struct se_node_acl *nacl, *nacl_tmp;
528
	LIST_HEAD(node_list);
529

530 531 532 533
	pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
		 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
		tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
		se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
534

535 536 537
	spin_lock_bh(&tpg_lock);
	list_del(&se_tpg->se_tpg_node);
	spin_unlock_bh(&tpg_lock);
538 539 540

	while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
		cpu_relax();
541

542 543 544
	mutex_lock(&se_tpg->acl_node_mutex);
	list_splice_init(&se_tpg->acl_node_list, &node_list);
	mutex_unlock(&se_tpg->acl_node_mutex);
545 546 547 548 549
	/*
	 * Release any remaining demo-mode generated se_node_acl that have
	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
	 * in transport_deregister_session().
	 */
550
	list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
551 552 553 554
		list_del(&nacl->acl_list);

		core_tpg_wait_for_nacl_pr_ref(nacl);
		core_free_device_list_for_node(nacl, se_tpg);
555
		kfree(nacl);
556
	}
557

558 559 560 561
	if (se_tpg->proto_id >= 0) {
		core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
		kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
	}
562 563 564 565 566

	return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);

567
struct se_lun *core_tpg_alloc_lun(
568
	struct se_portal_group *tpg,
Hannes Reinecke's avatar
Hannes Reinecke committed
569
	u64 unpacked_lun)
570 571 572
{
	struct se_lun *lun;

573 574 575 576
	lun = kzalloc(sizeof(*lun), GFP_KERNEL);
	if (!lun) {
		pr_err("Unable to allocate se_lun memory\n");
		return ERR_PTR(-ENOMEM);
577
	}
578 579 580 581
	lun->unpacked_lun = unpacked_lun;
	lun->lun_link_magic = SE_LUN_LINK_MAGIC;
	atomic_set(&lun->lun_acl_count, 0);
	init_completion(&lun->lun_ref_comp);
582
	init_completion(&lun->lun_shutdown_comp);
583 584 585 586 587 588 589 590
	INIT_LIST_HEAD(&lun->lun_deve_list);
	INIT_LIST_HEAD(&lun->lun_dev_link);
	atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
	spin_lock_init(&lun->lun_deve_lock);
	mutex_init(&lun->lun_tg_pt_md_mutex);
	INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
	spin_lock_init(&lun->lun_tg_pt_gp_lock);
	lun->lun_tpg = tpg;
591 592 593 594

	return lun;
}

595
int core_tpg_add_lun(
596 597
	struct se_portal_group *tpg,
	struct se_lun *lun,
598
	bool lun_access_ro,
599
	struct se_device *dev)
600
{
601 602
	int ret;

603
	ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
604
			      GFP_KERNEL);
605
	if (ret < 0)
606
		goto out;
607

608 609 610 611
	ret = core_alloc_rtpi(lun, dev);
	if (ret)
		goto out_kill_ref;

612 613
	if (!(dev->transport->transport_flags &
	     TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
614 615
	    !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
		target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
616

617
	mutex_lock(&tpg->tpg_lun_mutex);
618 619

	spin_lock(&dev->se_port_lock);
620
	lun->lun_index = dev->dev_index;
621 622 623 624
	rcu_assign_pointer(lun->lun_se_dev, dev);
	dev->export_count++;
	list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
	spin_unlock(&dev->se_port_lock);
625

626
	if (dev->dev_flags & DF_READ_ONLY)
627
		lun->lun_access_ro = true;
628
	else
629
		lun->lun_access_ro = lun_access_ro;
630 631 632
	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
		hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
	mutex_unlock(&tpg->tpg_lun_mutex);
633 634

	return 0;
635 636 637 638 639

out_kill_ref:
	percpu_ref_exit(&lun->lun_ref);
out:
	return ret;
640 641
}

642
void core_tpg_remove_lun(
643 644 645
	struct se_portal_group *tpg,
	struct se_lun *lun)
{
646 647 648 649 650
	/*
	 * rcu_dereference_raw protected by se_lun->lun_group symlink
	 * reference to se_device->dev_group.
	 */
	struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
651

652 653
	lun->lun_shutdown = true;

654
	core_clear_lun_from_tpg(lun, tpg);
655 656 657 658 659 660
	/*
	 * Wait for any active I/O references to percpu se_lun->lun_ref to
	 * be released.  Also, se_lun->lun_ref is now used by PR and ALUA
	 * logic when referencing a remote target port during ALL_TGT_PT=1
	 * and generating UNIT_ATTENTIONs for ALUA access state transition.
	 */
661
	transport_clear_lun_ref(lun);
662

663
	mutex_lock(&tpg->tpg_lun_mutex);
664 665
	if (lun->lun_se_dev) {
		target_detach_tg_pt_gp(lun);
666

667 668 669 670 671 672
		spin_lock(&dev->se_port_lock);
		list_del(&lun->lun_dev_link);
		dev->export_count--;
		rcu_assign_pointer(lun->lun_se_dev, NULL);
		spin_unlock(&dev->se_port_lock);
	}
673 674
	if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
		hlist_del_rcu(&lun->link);
675 676

	lun->lun_shutdown = false;
677
	mutex_unlock(&tpg->tpg_lun_mutex);
678

679
	percpu_ref_exit(&lun->lun_ref);
680
}