nd-core.h 7.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7 8
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 */
#ifndef __ND_CORE_H__
#define __ND_CORE_H__
#include <linux/libnvdimm.h>
#include <linux/device.h>
9 10
#include <linux/sizes.h>
#include <linux/mutex.h>
11
#include <linux/nd.h>
12
#include "nd.h"
13

14 15
extern struct list_head nvdimm_bus_list;
extern struct mutex nvdimm_bus_list_mutex;
16
extern int nvdimm_major;
17
extern struct workqueue_struct *nvdimm_wq;
18

19 20
struct nvdimm_bus {
	struct nvdimm_bus_descriptor *nd_desc;
21
	wait_queue_head_t wait;
22
	struct list_head list;
23
	struct device dev;
24
	int id, probe_active;
25
	atomic_t ioctl_active;
26
	struct list_head mapping_list;
27
	struct mutex reconfig_mutex;
28
	struct badrange badrange;
29
};
30

31 32 33
struct nvdimm {
	unsigned long flags;
	void *provider_data;
34
	unsigned long cmd_mask;
35
	struct device dev;
36
	atomic_t busy;
37 38
	int id, num_flush;
	struct resource *flush_wpq;
39
	const char *dimm_id;
40 41
	struct {
		const struct nvdimm_security_ops *ops;
42 43
		unsigned long flags;
		unsigned long ext_flags;
44 45
		unsigned int overwrite_tmo;
		struct kernfs_node *overwrite_state;
46
	} sec;
47
	struct delayed_work dwork;
48
	const struct nvdimm_fw_ops *fw_ops;
49 50
};

51
static inline unsigned long nvdimm_security_flags(
52
		struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
53
{
54 55 56 57 58 59
	u64 flags;
	const u64 state_flags = 1UL << NVDIMM_SECURITY_DISABLED
		| 1UL << NVDIMM_SECURITY_LOCKED
		| 1UL << NVDIMM_SECURITY_UNLOCKED
		| 1UL << NVDIMM_SECURITY_OVERWRITE;

60
	if (!nvdimm->sec.ops)
61
		return 0;
62

63 64 65 66 67 68
	flags = nvdimm->sec.ops->get_flags(nvdimm, ptype);
	/* disabled, locked, unlocked, and overwrite are mutually exclusive */
	dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1,
			"reported invalid security state: %#llx\n",
			(unsigned long long) flags);
	return flags;
69
}
70
int nvdimm_security_freeze(struct nvdimm *nvdimm);
71
#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
72
ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len);
73
void nvdimm_security_overwrite_query(struct work_struct *work);
74
#else
75 76
static inline ssize_t nvdimm_security_store(struct device *dev,
		const char *buf, size_t len)
77 78 79 80 81 82
{
	return -EOPNOTSUPP;
}
static inline void nvdimm_security_overwrite_query(struct work_struct *work)
{
}
83
#endif
84

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/**
 * struct blk_alloc_info - tracking info for BLK dpa scanning
 * @nd_mapping: blk region mapping boundaries
 * @available: decremented in alias_dpa_busy as aliased PMEM is scanned
 * @busy: decremented in blk_dpa_busy to account for ranges already
 * 	  handled by alias_dpa_busy
 * @res: alias_dpa_busy interprets this a free space range that needs to
 * 	 be truncated to the valid BLK allocation starting DPA, blk_dpa_busy
 * 	 treats it as a busy range that needs the aliased PMEM ranges
 * 	 truncated.
 */
struct blk_alloc_info {
	struct nd_mapping *nd_mapping;
	resource_size_t available, busy;
	struct resource *res;
};

102 103
bool is_nvdimm(struct device *dev);
bool is_nd_pmem(struct device *dev);
104
bool is_nd_volatile(struct device *dev);
105
bool is_nd_blk(struct device *dev);
106 107 108 109 110 111 112 113
static inline bool is_nd_region(struct device *dev)
{
	return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
}
static inline bool is_memory(struct device *dev)
{
	return is_nd_pmem(dev) || is_nd_volatile(dev);
}
114
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
115
int __init nvdimm_bus_init(void);
116
void nvdimm_bus_exit(void);
117
void nvdimm_devs_exit(void);
118
struct nd_region;
119
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev);
120
void nd_region_create_ns_seed(struct nd_region *nd_region);
121
void nd_region_create_btt_seed(struct nd_region *nd_region);
122
void nd_region_create_pfn_seed(struct nd_region *nd_region);
123
void nd_region_create_dax_seed(struct nd_region *nd_region);
124 125
int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus);
void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus);
126
void nd_synchronize(void);
127
void __nd_device_register(struct device *dev);
128 129 130 131 132 133
struct nd_label_id;
char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags);
bool nd_is_uuid_unique(struct device *dev, u8 *uuid);
struct nd_region;
struct nvdimm_drvdata;
struct nd_mapping;
134
void nd_mapping_free_labels(struct nd_mapping *nd_mapping);
135 136 137 138 139 140 141 142

int __reserve_free_pmem(struct device *dev, void *data);
void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
		       struct nd_mapping *nd_mapping);

resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
					   struct nd_mapping *nd_mapping);
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
143 144
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
		struct nd_mapping *nd_mapping, resource_size_t *overlap);
145
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
146
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
147 148
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
		resource_size_t size);
149 150
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
		struct nd_label_id *label_id);
151
int alias_dpa_busy(struct device *dev, void *data);
152 153 154
struct resource *nsblk_add_resource(struct nd_region *nd_region,
		struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
		resource_size_t start);
155
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
156
void get_ndd(struct nvdimm_drvdata *ndd);
157
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
158 159 160 161 162 163 164 165 166
void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
		struct nd_namespace_common **_ndns);
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
		struct nd_namespace_common **_ndns);
ssize_t nd_namespace_store(struct device *dev,
		struct nd_namespace_common **_ndns, const char *buf,
		size_t len);
167
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
168 169
bool is_nvdimm_bus(struct device *dev);

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
#if IS_ENABLED(CONFIG_ND_CLAIM)
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio,
		resource_size_t size);
void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
#else
static inline int devm_nsio_enable(struct device *dev,
		struct nd_namespace_io *nsio, resource_size_t size)
{
	return -ENXIO;
}

static inline void devm_nsio_disable(struct device *dev,
		struct nd_namespace_io *nsio)
{
}
#endif

187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
#ifdef CONFIG_PROVE_LOCKING
extern struct class *nd_class;

enum {
	LOCK_BUS,
	LOCK_NDCTL,
	LOCK_REGION,
	LOCK_DIMM = LOCK_REGION,
	LOCK_NAMESPACE,
	LOCK_CLAIM,
};

static inline void debug_nvdimm_lock(struct device *dev)
{
	if (is_nd_region(dev))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION);
	else if (is_nvdimm(dev))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM);
	else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM);
	else if (dev->parent && (is_nd_region(dev->parent)))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE);
	else if (is_nvdimm_bus(dev))
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS);
	else if (dev->class && dev->class == nd_class)
		mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL);
	else
		dev_WARN(dev, "unknown lock level\n");
}

static inline void debug_nvdimm_unlock(struct device *dev)
{
	mutex_unlock(&dev->lockdep_mutex);
}

static inline void nd_device_lock(struct device *dev)
{
	device_lock(dev);
	debug_nvdimm_lock(dev);
}

static inline void nd_device_unlock(struct device *dev)
{
	debug_nvdimm_unlock(dev);
	device_unlock(dev);
}
#else
static inline void nd_device_lock(struct device *dev)
{
	device_lock(dev);
}

static inline void nd_device_unlock(struct device *dev)
{
	device_unlock(dev);
}

static inline void debug_nvdimm_lock(struct device *dev)
{
}

static inline void debug_nvdimm_unlock(struct device *dev)
{
}
#endif
252
#endif /* __ND_CORE_H__ */