Commit 4ac39c59 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_urgent_for_v6.3_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:
 "There's a little bit more 'movement' in there for my taste but it
  needs to happen and should make the code better after it.

   - Check cmdline_find_option()'s return value before further
     processing

   - Clear temporary storage in the resctrl code to prevent access to an
     unexistent MSR

   - Add a simple throttling mechanism to protect the hypervisor from
     potentially malicious SEV guests issuing requests in rapid
     succession.

     In order to not jeopardize the sanity of everyone involved in
     maintaining this code, the request issuing side has received a
     cleanup, split in more or less trivial, small and digestible
     pieces. Otherwise, the code was threatening to become an
     unmaintainable mess.

     Therefore, that cleanup is marked indirectly also for stable so
     that there's no differences between the upstream code and the
     stable variant when it comes down to backporting more there"

* tag 'x86_urgent_for_v6.3_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Fix use of uninitialized buffer in sme_enable()
  x86/resctrl: Clear staged_config[] before and after it is used
  virt/coco/sev-guest: Add throttling awareness
  virt/coco/sev-guest: Convert the sw_exit_info_2 checking to a switch-case
  virt/coco/sev-guest: Do some code style cleanups
  virt/coco/sev-guest: Carve out the request issuing logic into a helper
  virt/coco/sev-guest: Remove the disable_vmpck label in handle_guest_request()
  virt/coco/sev-guest: Simplify extended guest request handling
  virt/coco/sev-guest: Check SEV_SNP attribute at probe time
parents 995bba43 cbebd68f
...@@ -128,8 +128,9 @@ struct snp_psc_desc { ...@@ -128,8 +128,9 @@ struct snp_psc_desc {
struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY]; struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
} __packed; } __packed;
/* Guest message request error code */ /* Guest message request error codes */
#define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32) #define SNP_GUEST_REQ_INVALID_LEN BIT_ULL(32)
#define SNP_GUEST_REQ_ERR_BUSY BIT_ULL(33)
#define GHCB_MSR_TERM_REQ 0x100 #define GHCB_MSR_TERM_REQ 0x100
#define GHCB_MSR_TERM_REASON_SET_POS 12 #define GHCB_MSR_TERM_REASON_SET_POS 12
......
...@@ -368,7 +368,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -368,7 +368,6 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
{ {
struct resctrl_schema *s; struct resctrl_schema *s;
struct rdtgroup *rdtgrp; struct rdtgroup *rdtgrp;
struct rdt_domain *dom;
struct rdt_resource *r; struct rdt_resource *r;
char *tok, *resname; char *tok, *resname;
int ret = 0; int ret = 0;
...@@ -397,10 +396,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -397,10 +396,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
goto out; goto out;
} }
list_for_each_entry(s, &resctrl_schema_all, list) { rdt_staged_configs_clear();
list_for_each_entry(dom, &s->res->domains, list)
memset(dom->staged_config, 0, sizeof(dom->staged_config));
}
while ((tok = strsep(&buf, "\n")) != NULL) { while ((tok = strsep(&buf, "\n")) != NULL) {
resname = strim(strsep(&tok, ":")); resname = strim(strsep(&tok, ":"));
...@@ -445,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, ...@@ -445,6 +441,7 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
} }
out: out:
rdt_staged_configs_clear();
rdtgroup_kn_unlock(of->kn); rdtgroup_kn_unlock(of->kn);
cpus_read_unlock(); cpus_read_unlock();
return ret ?: nbytes; return ret ?: nbytes;
......
...@@ -555,5 +555,6 @@ void __check_limbo(struct rdt_domain *d, bool force_free); ...@@ -555,5 +555,6 @@ void __check_limbo(struct rdt_domain *d, bool force_free);
void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void rdt_domain_reconfigure_cdp(struct rdt_resource *r);
void __init thread_throttle_mode_init(void); void __init thread_throttle_mode_init(void);
void __init mbm_config_rftype_init(const char *config); void __init mbm_config_rftype_init(const char *config);
void rdt_staged_configs_clear(void);
#endif /* _ASM_X86_RESCTRL_INTERNAL_H */ #endif /* _ASM_X86_RESCTRL_INTERNAL_H */
...@@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...) ...@@ -78,6 +78,19 @@ void rdt_last_cmd_printf(const char *fmt, ...)
va_end(ap); va_end(ap);
} }
void rdt_staged_configs_clear(void)
{
struct rdt_resource *r;
struct rdt_domain *dom;
lockdep_assert_held(&rdtgroup_mutex);
for_each_alloc_capable_rdt_resource(r) {
list_for_each_entry(dom, &r->domains, list)
memset(dom->staged_config, 0, sizeof(dom->staged_config));
}
}
/* /*
* Trivial allocator for CLOSIDs. Since h/w only supports a small number, * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
* we can keep a bitmap of free CLOSIDs in a single integer. * we can keep a bitmap of free CLOSIDs in a single integer.
...@@ -3107,7 +3120,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) ...@@ -3107,7 +3120,9 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
{ {
struct resctrl_schema *s; struct resctrl_schema *s;
struct rdt_resource *r; struct rdt_resource *r;
int ret; int ret = 0;
rdt_staged_configs_clear();
list_for_each_entry(s, &resctrl_schema_all, list) { list_for_each_entry(s, &resctrl_schema_all, list) {
r = s->res; r = s->res;
...@@ -3119,20 +3134,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) ...@@ -3119,20 +3134,22 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
} else { } else {
ret = rdtgroup_init_cat(s, rdtgrp->closid); ret = rdtgroup_init_cat(s, rdtgrp->closid);
if (ret < 0) if (ret < 0)
return ret; goto out;
} }
ret = resctrl_arch_update_domains(r, rdtgrp->closid); ret = resctrl_arch_update_domains(r, rdtgrp->closid);
if (ret < 0) { if (ret < 0) {
rdt_last_cmd_puts("Failed to initialize allocations\n"); rdt_last_cmd_puts("Failed to initialize allocations\n");
return ret; goto out;
} }
} }
rdtgrp->mode = RDT_MODE_SHAREABLE; rdtgrp->mode = RDT_MODE_SHAREABLE;
return 0; out:
rdt_staged_configs_clear();
return ret;
} }
static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
......
...@@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned ...@@ -2183,9 +2183,6 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
struct ghcb *ghcb; struct ghcb *ghcb;
int ret; int ret;
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return -ENODEV;
if (!fw_err) if (!fw_err)
return -EINVAL; return -EINVAL;
...@@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned ...@@ -2212,15 +2209,26 @@ int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned
if (ret) if (ret)
goto e_put; goto e_put;
if (ghcb->save.sw_exit_info_2) { *fw_err = ghcb->save.sw_exit_info_2;
/* Number of expected pages are returned in RBX */ switch (*fw_err) {
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST && case 0:
ghcb->save.sw_exit_info_2 == SNP_GUEST_REQ_INVALID_LEN) break;
input->data_npages = ghcb_get_rbx(ghcb);
*fw_err = ghcb->save.sw_exit_info_2; case SNP_GUEST_REQ_ERR_BUSY:
ret = -EAGAIN;
break;
case SNP_GUEST_REQ_INVALID_LEN:
/* Number of expected pages are returned in RBX */
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
input->data_npages = ghcb_get_rbx(ghcb);
ret = -ENOSPC;
break;
}
fallthrough;
default:
ret = -EIO; ret = -EIO;
break;
} }
e_put: e_put:
......
...@@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp) ...@@ -600,7 +600,8 @@ void __init sme_enable(struct boot_params *bp)
cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
((u64)bp->ext_cmd_line_ptr << 32)); ((u64)bp->ext_cmd_line_ptr << 32));
cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
return;
if (!strncmp(buffer, cmdline_on, sizeof(buffer))) if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
sme_me_mask = me_mask; sme_me_mask = me_mask;
......
...@@ -31,6 +31,9 @@ ...@@ -31,6 +31,9 @@
#define AAD_LEN 48 #define AAD_LEN 48
#define MSG_HDR_VER 1 #define MSG_HDR_VER 1
#define SNP_REQ_MAX_RETRY_DURATION (60*HZ)
#define SNP_REQ_RETRY_DELAY (2*HZ)
struct snp_guest_crypto { struct snp_guest_crypto {
struct crypto_aead *tfm; struct crypto_aead *tfm;
u8 *iv, *authtag; u8 *iv, *authtag;
...@@ -318,26 +321,14 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8 ...@@ -318,26 +321,14 @@ static int enc_payload(struct snp_guest_dev *snp_dev, u64 seqno, int version, u8
return __enc_payload(snp_dev, req, payload, sz); return __enc_payload(snp_dev, req, payload, sz);
} }
static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver, static int __handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, __u64 *fw_err)
u8 type, void *req_buf, size_t req_sz, void *resp_buf,
u32 resp_sz, __u64 *fw_err)
{ {
unsigned long err; unsigned long err = 0xff, override_err = 0;
u64 seqno; unsigned long req_start = jiffies;
unsigned int override_npages = 0;
int rc; int rc;
/* Get message sequence and verify that its a non-zero */ retry_request:
seqno = snp_get_msg_seqno(snp_dev);
if (!seqno)
return -EIO;
memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
/* Encrypt the userspace provided payload */
rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
if (rc)
return rc;
/* /*
* Call firmware to process the request. In this function the encrypted * Call firmware to process the request. In this function the encrypted
* message enters shared memory with the host. So after this call the * message enters shared memory with the host. So after this call the
...@@ -345,18 +336,24 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in ...@@ -345,18 +336,24 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
* prevent reuse of the IV. * prevent reuse of the IV.
*/ */
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err);
switch (rc) {
case -ENOSPC:
/*
* If the extended guest request fails due to having too
* small of a certificate data buffer, retry the same
* guest request without the extended data request in
* order to increment the sequence number and thus avoid
* IV reuse.
*/
override_npages = snp_dev->input.data_npages;
exit_code = SVM_VMGEXIT_GUEST_REQUEST;
/* /*
* If the extended guest request fails due to having too small of a * Override the error to inform callers the given extended
* certificate data buffer, retry the same guest request without the * request buffer size was too small and give the caller the
* extended data request in order to increment the sequence number * required buffer size.
* and thus avoid IV reuse. */
*/ override_err = SNP_GUEST_REQ_INVALID_LEN;
if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST &&
err == SNP_GUEST_REQ_INVALID_LEN) {
const unsigned int certs_npages = snp_dev->input.data_npages;
exit_code = SVM_VMGEXIT_GUEST_REQUEST;
/* /*
* If this call to the firmware succeeds, the sequence number can * If this call to the firmware succeeds, the sequence number can
...@@ -366,15 +363,20 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in ...@@ -366,15 +363,20 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
* of the VMPCK and the error code being propagated back to the * of the VMPCK and the error code being propagated back to the
* user as an ioctl() return code. * user as an ioctl() return code.
*/ */
rc = snp_issue_guest_request(exit_code, &snp_dev->input, &err); goto retry_request;
/* /*
* Override the error to inform callers the given extended * The host may return SNP_GUEST_REQ_ERR_EBUSY if the request has been
* request buffer size was too small and give the caller the * throttled. Retry in the driver to avoid returning and reusing the
* required buffer size. * message sequence number on a different message.
*/ */
err = SNP_GUEST_REQ_INVALID_LEN; case -EAGAIN:
snp_dev->input.data_npages = certs_npages; if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) {
rc = -ETIMEDOUT;
break;
}
schedule_timeout_killable(SNP_REQ_RETRY_DELAY);
goto retry_request;
} }
/* /*
...@@ -386,7 +388,10 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in ...@@ -386,7 +388,10 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
snp_inc_msg_seqno(snp_dev); snp_inc_msg_seqno(snp_dev);
if (fw_err) if (fw_err)
*fw_err = err; *fw_err = override_err ?: err;
if (override_npages)
snp_dev->input.data_npages = override_npages;
/* /*
* If an extended guest request was issued and the supplied certificate * If an extended guest request was issued and the supplied certificate
...@@ -394,29 +399,49 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in ...@@ -394,29 +399,49 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, in
* prevent IV reuse. If the standard request was successful, return -EIO * prevent IV reuse. If the standard request was successful, return -EIO
* back to the caller as would have originally been returned. * back to the caller as would have originally been returned.
*/ */
if (!rc && err == SNP_GUEST_REQ_INVALID_LEN) if (!rc && override_err == SNP_GUEST_REQ_INVALID_LEN)
return -EIO;
return rc;
}
static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code, int msg_ver,
u8 type, void *req_buf, size_t req_sz, void *resp_buf,
u32 resp_sz, __u64 *fw_err)
{
u64 seqno;
int rc;
/* Get message sequence and verify that its a non-zero */
seqno = snp_get_msg_seqno(snp_dev);
if (!seqno)
return -EIO; return -EIO;
memset(snp_dev->response, 0, sizeof(struct snp_guest_msg));
/* Encrypt the userspace provided payload */
rc = enc_payload(snp_dev, seqno, msg_ver, type, req_buf, req_sz);
if (rc)
return rc;
rc = __handle_guest_request(snp_dev, exit_code, fw_err);
if (rc) { if (rc) {
dev_alert(snp_dev->dev, if (rc == -EIO && *fw_err == SNP_GUEST_REQ_INVALID_LEN)
"Detected error from ASP request. rc: %d, fw_err: %llu\n", return rc;
rc, *fw_err);
goto disable_vmpck; dev_alert(snp_dev->dev, "Detected error from ASP request. rc: %d, fw_err: %llu\n", rc, *fw_err);
snp_disable_vmpck(snp_dev);
return rc;
} }
rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz); rc = verify_and_dec_payload(snp_dev, resp_buf, resp_sz);
if (rc) { if (rc) {
dev_alert(snp_dev->dev, dev_alert(snp_dev->dev, "Detected unexpected decode failure from ASP. rc: %d\n", rc);
"Detected unexpected decode failure from ASP. rc: %d\n", snp_disable_vmpck(snp_dev);
rc); return rc;
goto disable_vmpck;
} }
return 0; return 0;
disable_vmpck:
snp_disable_vmpck(snp_dev);
return rc;
} }
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
...@@ -703,6 +728,9 @@ static int __init sev_guest_probe(struct platform_device *pdev) ...@@ -703,6 +728,9 @@ static int __init sev_guest_probe(struct platform_device *pdev)
void __iomem *mapping; void __iomem *mapping;
int ret; int ret;
if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
return -ENODEV;
if (!dev->platform_data) if (!dev->platform_data)
return -ENODEV; return -ENODEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment