return 0;
}
+static int intel_security_erase(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key)
+{
+ int rc;
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct {
+ struct nd_cmd_pkg pkg;
+ struct nd_intel_secure_erase cmd;
+ } nd_cmd = {
+ .pkg = {
+ .nd_family = NVDIMM_FAMILY_INTEL,
+ .nd_size_in = ND_INTEL_PASSPHRASE_SIZE,
+ .nd_size_out = ND_INTEL_STATUS_SIZE,
+ .nd_fw_size = ND_INTEL_STATUS_SIZE,
+ .nd_command = NVDIMM_INTEL_SECURE_ERASE,
+ },
+ };
+
+ if (!test_bit(NVDIMM_INTEL_SECURE_ERASE, &nfit_mem->dsm_mask))
+ return -ENOTTY;
+
+ /* flush all cache before we erase DIMM */
+ nvdimm_invalidate_cache();
+ memcpy(nd_cmd.cmd.passphrase, key->data,
+ sizeof(nd_cmd.cmd.passphrase));
+ rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL);
+ if (rc < 0)
+ return rc;
+
+ switch (nd_cmd.cmd.status) {
+ case 0:
+ break;
+ case ND_INTEL_STATUS_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case ND_INTEL_STATUS_INVALID_PASS:
+ return -EINVAL;
+ case ND_INTEL_STATUS_INVALID_STATE:
+ default:
+ return -ENXIO;
+ }
+
+ /* DIMM erased, invalidate all CPU caches before we read it */
+ nvdimm_invalidate_cache();
+ return 0;
+}
+
/*
* TODO: define a cross arch wbinvd equivalent when/if
* NVDIMM_FAMILY_INTEL command support arrives on another arch.
.disable = intel_security_disable,
#ifdef CONFIG_X86
.unlock = intel_security_unlock,
+ .erase = intel_security_erase,
#endif
};
#define OPS \
C( OP_FREEZE, "freeze", 1), \
C( OP_DISABLE, "disable", 2), \
- C( OP_UPDATE, "update", 3)
+ C( OP_UPDATE, "update", 3), \
+ C( OP_ERASE, "erase", 2)
#undef C
#define C(a, b, c) a
enum nvdimmsec_op_ids { OPS };
} else if (i == OP_UPDATE) {
dev_dbg(dev, "update %u %u\n", key, newkey);
rc = nvdimm_security_update(nvdimm, key, newkey);
+ } else if (i == OP_ERASE) {
+ dev_dbg(dev, "erase %u\n", key);
+ rc = nvdimm_security_erase(nvdimm, key);
} else
return -EINVAL;
return 0;
/* Are there any state mutation ops? */
if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
- || nvdimm->sec.ops->change_key)
+ || nvdimm->sec.ops->change_key
+ || nvdimm->sec.ops->erase)
return a->mode;
return 0444;
}
int nvdimm_security_disable(struct nvdimm *nvdimm, unsigned int keyid);
int nvdimm_security_update(struct nvdimm *nvdimm, unsigned int keyid,
unsigned int new_keyid);
+int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid);
#else
static inline int nvdimm_security_disable(struct nvdimm *nvdimm,
unsigned int keyid)
{
return -EOPNOTSUPP;
}
+static inline int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid)
+{
+ return -EOPNOTSUPP;
+}
#endif
/**
static void nvdimm_put_key(struct key *key)
{
+ if (!key)
+ return;
+
up_read(&key->sem);
key_put(key);
}
nvdimm->sec.state = nvdimm_security_state(nvdimm);
return rc;
}
+
+int nvdimm_security_erase(struct nvdimm *nvdimm, unsigned int keyid)
+{
+ struct device *dev = &nvdimm->dev;
+ struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
+ struct key *key;
+ int rc;
+
+ /* The bus lock should be held at the top level of the call stack */
+ lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
+
+ if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
+ || nvdimm->sec.state < 0)
+ return -EOPNOTSUPP;
+
+ if (atomic_read(&nvdimm->busy)) {
+ dev_warn(dev, "Unable to secure erase while DIMM active.\n");
+ return -EBUSY;
+ }
+
+ if (nvdimm->sec.state >= NVDIMM_SECURITY_FROZEN) {
+ dev_warn(dev, "Incorrect security state: %d\n",
+ nvdimm->sec.state);
+ return -EIO;
+ }
+
+ key = nvdimm_lookup_user_key(nvdimm, keyid, NVDIMM_BASE_KEY);
+ if (!key)
+ return -ENOKEY;
+
+ rc = nvdimm->sec.ops->erase(nvdimm, key_data(key));
+ dev_dbg(dev, "key: %d erase: %s\n", key_serial(key),
+ rc == 0 ? "success" : "fail");
+
+ nvdimm_put_key(key);
+ nvdimm->sec.state = nvdimm_security_state(nvdimm);
+ return rc;
+}
const struct nvdimm_key_data *key_data);
int (*disable)(struct nvdimm *nvdimm,
const struct nvdimm_key_data *key_data);
+ int (*erase)(struct nvdimm *nvdimm,
+ const struct nvdimm_key_data *key_data);
};
void badrange_init(struct badrange *badrange);