diff options
Diffstat (limited to 'drivers/cxl')
-rw-r--r-- | drivers/cxl/core/core.h | 11 | ||||
-rw-r--r-- | drivers/cxl/core/hdm.c | 178 | ||||
-rw-r--r-- | drivers/cxl/core/mbox.c | 151 | ||||
-rw-r--r-- | drivers/cxl/core/memdev.c | 227 | ||||
-rw-r--r-- | drivers/cxl/core/pci.c | 146 | ||||
-rw-r--r-- | drivers/cxl/core/pmem.c | 6 | ||||
-rw-r--r-- | drivers/cxl/core/port.c | 41 | ||||
-rw-r--r-- | drivers/cxl/core/region.c | 157 | ||||
-rw-r--r-- | drivers/cxl/core/trace.c | 94 | ||||
-rw-r--r-- | drivers/cxl/core/trace.h | 103 | ||||
-rw-r--r-- | drivers/cxl/cxl.h | 8 | ||||
-rw-r--r-- | drivers/cxl/cxlmem.h | 111 | ||||
-rw-r--r-- | drivers/cxl/cxlpci.h | 14 | ||||
-rw-r--r-- | drivers/cxl/mem.c | 71 | ||||
-rw-r--r-- | drivers/cxl/pci.c | 53 | ||||
-rw-r--r-- | drivers/cxl/port.c | 22 |
16 files changed, 1124 insertions, 269 deletions
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h index cde475e13216..27f0968449de 100644 --- a/drivers/cxl/core/core.h +++ b/drivers/cxl/core/core.h @@ -25,7 +25,12 @@ void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled); #define CXL_DAX_REGION_TYPE(x) (&cxl_dax_region_type) int cxl_region_init(void); void cxl_region_exit(void); +int cxl_get_poison_by_endpoint(struct cxl_port *port); #else +static inline int cxl_get_poison_by_endpoint(struct cxl_port *port) +{ + return 0; +} static inline void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) { } @@ -64,4 +69,10 @@ int cxl_memdev_init(void); void cxl_memdev_exit(void); void cxl_mbox_init(void); +enum cxl_poison_trace_type { + CXL_POISON_TRACE_LIST, + CXL_POISON_TRACE_INJECT, + CXL_POISON_TRACE_CLEAR, +}; + #endif /* __CXL_CORE_H__ */ diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c index 45deda18ed32..7889ff203a34 100644 --- a/drivers/cxl/core/hdm.c +++ b/drivers/cxl/core/hdm.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ -#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/delay.h> @@ -93,33 +92,57 @@ static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb, cxl_probe_component_regs(&port->dev, crb, &map.component_map); if (!map.component_map.hdm_decoder.valid) { - dev_err(&port->dev, "HDM decoder registers invalid\n"); - return -ENXIO; + dev_dbg(&port->dev, "HDM decoder registers not implemented\n"); + /* unique error code to indicate no HDM decoder capability */ + return -ENODEV; } return cxl_map_component_regs(&port->dev, regs, &map, BIT(CXL_CM_CAP_CAP_ID_HDM)); } -static struct cxl_hdm *devm_cxl_setup_emulated_hdm(struct cxl_port *port, - struct cxl_endpoint_dvsec_info *info) +static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info) { - struct device *dev = &port->dev; struct cxl_hdm *cxlhdm; + void __iomem *hdm; + u32 ctrl; + int i; - if (!info->mem_enabled) - return ERR_PTR(-ENODEV); + if (!info) + return false; - cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); - if (!cxlhdm) - return ERR_PTR(-ENOMEM); + cxlhdm = dev_get_drvdata(&info->port->dev); + hdm = cxlhdm->regs.hdm_decoder; - cxlhdm->port = port; - cxlhdm->decoder_count = info->ranges; - cxlhdm->target_count = info->ranges; - dev_set_drvdata(&port->dev, cxlhdm); + if (!hdm) + return true; - return cxlhdm; + /* + * If HDM decoders are present and the driver is in control of + * Mem_Enable skip DVSEC based emulation + */ + if (!info->mem_enabled) + return false; + + /* + * If any decoders are committed already, there should not be any + * emulated DVSEC decoders. + */ + for (i = 0; i < cxlhdm->decoder_count; i++) { + ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); + dev_dbg(&info->port->dev, + "decoder%d.%d: committed: %ld base: %#x_%.8x size: %#x_%.8x\n", + info->port->id, i, + FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl), + readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i)), + readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(i)), + readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i)), + readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i))); + if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) + return false; + } + + return true; } /** @@ -138,13 +161,14 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL); if (!cxlhdm) return ERR_PTR(-ENOMEM); - cxlhdm->port = port; - crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); - if (!crb) { - if (info && info->mem_enabled) - return devm_cxl_setup_emulated_hdm(port, info); + dev_set_drvdata(dev, cxlhdm); + crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE); + if (!crb && info && info->mem_enabled) { + cxlhdm->decoder_count = info->ranges; + return cxlhdm; + } else if (!crb) { dev_err(dev, "No component registers mapped\n"); return ERR_PTR(-ENXIO); } @@ -160,7 +184,15 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port, return ERR_PTR(-ENXIO); } - dev_set_drvdata(dev, cxlhdm); + /* + * Now that the hdm capability is parsed, decide if range + * register emulation is needed and fixup cxlhdm accordingly. + */ + if (should_emulate_decoders(info)) { + dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges, + info->ranges > 1 ? "s" : ""); + cxlhdm->decoder_count = info->ranges; + } return cxlhdm; } @@ -245,8 +277,11 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, lockdep_assert_held_write(&cxl_dpa_rwsem); - if (!len) - goto success; + if (!len) { + dev_warn(dev, "decoder%d.%d: empty reservation attempted\n", + port->id, cxled->cxld.id); + return -EINVAL; + } if (cxled->dpa_res) { dev_dbg(dev, "decoder%d.%d: existing allocation %pr assigned\n", @@ -299,7 +334,6 @@ static int __cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled, cxled->mode = CXL_DECODER_MIXED; } -success: port->hdm_end++; get_device(&cxled->cxld.dev); return 0; @@ -714,14 +748,20 @@ static int cxl_decoder_reset(struct cxl_decoder *cxld) return 0; } -static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port, - struct cxl_decoder *cxld, int which, - struct cxl_endpoint_dvsec_info *info) +static int cxl_setup_hdm_decoder_from_dvsec( + struct cxl_port *port, struct cxl_decoder *cxld, u64 *dpa_base, + int which, struct cxl_endpoint_dvsec_info *info) { + struct cxl_endpoint_decoder *cxled; + u64 len; + int rc; + if (!is_cxl_endpoint(port)) return -EOPNOTSUPP; - if (!range_len(&info->dvsec_range[which])) + cxled = to_cxl_endpoint_decoder(&cxld->dev); + len = range_len(&info->dvsec_range[which]); + if (!len) return -ENOENT; cxld->target_type = CXL_DECODER_EXPANDER; @@ -736,41 +776,25 @@ static int cxl_setup_hdm_decoder_from_dvsec(struct cxl_port *port, cxld->flags |= CXL_DECODER_F_ENABLE | CXL_DECODER_F_LOCK; port->commit_end = cxld->id; - return 0; -} - -static bool should_emulate_decoders(struct cxl_port *port) -{ - struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev); - void __iomem *hdm = cxlhdm->regs.hdm_decoder; - u32 ctrl; - int i; - - if (!is_cxl_endpoint(cxlhdm->port)) - return false; - - if (!hdm) - return true; - - /* - * If any decoders are committed already, there should not be any - * emulated DVSEC decoders. - */ - for (i = 0; i < cxlhdm->decoder_count; i++) { - ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(i)); - if (FIELD_GET(CXL_HDM_DECODER0_CTRL_COMMITTED, ctrl)) - return false; + rc = devm_cxl_dpa_reserve(cxled, *dpa_base, len, 0); + if (rc) { + dev_err(&port->dev, + "decoder%d.%d: Failed to reserve DPA range %#llx - %#llx\n (%d)", + port->id, cxld->id, *dpa_base, *dpa_base + len - 1, rc); + return rc; } + *dpa_base += len; + cxled->state = CXL_DECODER_STATE_AUTO; - return true; + return 0; } static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, int *target_map, void __iomem *hdm, int which, u64 *dpa_base, struct cxl_endpoint_dvsec_info *info) { - struct cxl_endpoint_decoder *cxled = NULL; - u64 size, base, skip, dpa_size; + u64 size, base, skip, dpa_size, lo, hi; + struct cxl_endpoint_decoder *cxled; bool committed; u32 remainder; int i, rc; @@ -780,15 +804,17 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, unsigned char target_id[8]; } target_list; - if (should_emulate_decoders(port)) - return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info); - - if (is_endpoint_decoder(&cxld->dev)) - cxled = to_cxl_endpoint_decoder(&cxld->dev); + if (should_emulate_decoders(info)) + return cxl_setup_hdm_decoder_from_dvsec(port, cxld, dpa_base, + which, info); ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(which)); - base = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); - size = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); + lo = readl(hdm + CXL_HDM_DECODER0_BASE_LOW_OFFSET(which)); + hi = readl(hdm + CXL_HDM_DECODER0_BASE_HIGH_OFFSET(which)); + base = (hi << 32) + lo; + lo = readl(hdm + CXL_HDM_DECODER0_SIZE_LOW_OFFSET(which)); + hi = readl(hdm + CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(which)); + size = (hi << 32) + lo; committed = !!(ctrl & CXL_HDM_DECODER0_CTRL_COMMITTED); cxld->commit = cxl_decoder_commit; cxld->reset = cxl_decoder_reset; @@ -806,9 +832,6 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, .end = base + size - 1, }; - if (cxled && !committed && range_len(&info->dvsec_range[which])) - return cxl_setup_hdm_decoder_from_dvsec(port, cxld, which, info); - /* decoders are enabled if committed */ if (committed) { cxld->flags |= CXL_DECODER_F_ENABLE; @@ -824,6 +847,13 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, port->id, cxld->id); return -ENXIO; } + + if (size == 0) { + dev_warn(&port->dev, + "decoder%d.%d: Committed with zero size\n", + port->id, cxld->id); + return -ENXIO; + } port->commit_end = cxld->id; } else { /* unless / until type-2 drivers arrive, assume type-3 */ @@ -846,9 +876,14 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, if (rc) return rc; - if (!cxled) { - target_list.value = - ioread64_hi_lo(hdm + CXL_HDM_DECODER0_TL_LOW(which)); + dev_dbg(&port->dev, "decoder%d.%d: range: %#llx-%#llx iw: %d ig: %d\n", + port->id, cxld->id, cxld->hpa_range.start, cxld->hpa_range.end, + cxld->interleave_ways, cxld->interleave_granularity); + + if (!info) { + lo = readl(hdm + CXL_HDM_DECODER0_TL_LOW(which)); + hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which)); + target_list.value = (hi << 32) + lo; for (i = 0; i < cxld->interleave_ways; i++) target_map[i] = target_list.target_id[i]; @@ -865,7 +900,10 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld, port->id, cxld->id, size, cxld->interleave_ways); return -ENXIO; } - skip = ioread64_hi_lo(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); + lo = readl(hdm + CXL_HDM_DECODER0_SKIP_LOW(which)); + hi = readl(hdm + CXL_HDM_DECODER0_SKIP_HIGH(which)); + skip = (hi << 32) + lo; + cxled = to_cxl_endpoint_decoder(&cxld->dev); rc = devm_cxl_dpa_reserve(cxled, *dpa_base + skip, dpa_size, skip); if (rc) { dev_err(&port->dev, diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index f2addb457172..23b9ff920d7e 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -1,10 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ -#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/security.h> #include <linux/debugfs.h> #include <linux/ktime.h> #include <linux/mutex.h> +#include <asm/unaligned.h> +#include <cxlpci.h> #include <cxlmem.h> #include <cxl.h> @@ -61,12 +62,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { CXL_CMD(SET_ALERT_CONFIG, 0xc, 0, 0), CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0), CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0), - CXL_CMD(GET_POISON, 0x10, CXL_VARIABLE_PAYLOAD, 0), - CXL_CMD(INJECT_POISON, 0x8, 0, 0), - CXL_CMD(CLEAR_POISON, 0x48, 0, 0), CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0), - CXL_CMD(SCAN_MEDIA, 0x11, 0, 0), - CXL_CMD(GET_SCAN_MEDIA, 0, CXL_VARIABLE_PAYLOAD, 0), }; /* @@ -87,6 +83,9 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = { * * CXL_MBOX_OP_[GET_]SCAN_MEDIA: The kernel provides a native error list that * is kept up to date with patrol notifications and error management. + * + * CXL_MBOX_OP_[GET_,INJECT_,CLEAR_]POISON: These commands require kernel + * driver orchestration for safety. */ static u16 cxl_disabled_raw_commands[] = { CXL_MBOX_OP_ACTIVATE_FW, @@ -95,6 +94,9 @@ static u16 cxl_disabled_raw_commands[] = { CXL_MBOX_OP_SET_SHUTDOWN_STATE, CXL_MBOX_OP_SCAN_MEDIA, CXL_MBOX_OP_GET_SCAN_MEDIA, + CXL_MBOX_OP_GET_POISON, + CXL_MBOX_OP_INJECT_POISON, + CXL_MBOX_OP_CLEAR_POISON, }; /* @@ -119,6 +121,43 @@ static bool cxl_is_security_command(u16 opcode) return false; } +static bool cxl_is_poison_command(u16 opcode) +{ +#define CXL_MBOX_OP_POISON_CMDS 0x43 + + if ((opcode >> 8) == CXL_MBOX_OP_POISON_CMDS) + return true; + + return false; +} + +static void cxl_set_poison_cmd_enabled(struct cxl_poison_state *poison, + u16 opcode) +{ + switch (opcode) { + case CXL_MBOX_OP_GET_POISON: + set_bit(CXL_POISON_ENABLED_LIST, poison->enabled_cmds); + break; + case CXL_MBOX_OP_INJECT_POISON: + set_bit(CXL_POISON_ENABLED_INJECT, poison->enabled_cmds); + break; + case CXL_MBOX_OP_CLEAR_POISON: + set_bit(CXL_POISON_ENABLED_CLEAR, poison->enabled_cmds); + break; + case CXL_MBOX_OP_GET_SCAN_MEDIA_CAPS: + set_bit(CXL_POISON_ENABLED_SCAN_CAPS, poison->enabled_cmds); + break; + case CXL_MBOX_OP_SCAN_MEDIA: + set_bit(CXL_POISON_ENABLED_SCAN_MEDIA, poison->enabled_cmds); + break; + case CXL_MBOX_OP_GET_SCAN_MEDIA: + set_bit(CXL_POISON_ENABLED_SCAN_RESULTS, poison->enabled_cmds); + break; + default: + break; + } +} + static struct cxl_mem_command *cxl_mem_find_command(u16 opcode) { struct cxl_mem_command *c; @@ -634,13 +673,18 @@ static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel) u16 opcode = le16_to_cpu(cel_entry[i].opcode); struct cxl_mem_command *cmd = cxl_mem_find_command(opcode); - if (!cmd) { + if (!cmd && !cxl_is_poison_command(opcode)) { dev_dbg(cxlds->dev, "Opcode 0x%04x unsupported by driver\n", opcode); continue; } - set_bit(cmd->info.id, cxlds->enabled_cmds); + if (cmd) + set_bit(cmd->info.id, cxlds->enabled_cmds); + + if (cxl_is_poison_command(opcode)) + cxl_set_poison_cmd_enabled(&cxlds->poison, opcode); + dev_dbg(cxlds->dev, "Opcode 0x%04x enabled\n", opcode); } } @@ -994,6 +1038,7 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds) /* See CXL 2.0 Table 175 Identify Memory Device Output Payload */ struct cxl_mbox_identify id; struct cxl_mbox_cmd mbox_cmd; + u32 val; int rc; mbox_cmd = (struct cxl_mbox_cmd) { @@ -1017,6 +1062,11 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds) cxlds->lsa_size = le32_to_cpu(id.lsa_size); memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision)); + if (test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) { + val = get_unaligned_le24(id.poison_list_max_mer); + cxlds->poison.max_errors = min_t(u32, val, CXL_POISON_LIST_MAX); + } + return 0; } EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL); @@ -1107,6 +1157,91 @@ int cxl_set_timestamp(struct cxl_dev_state *cxlds) } EXPORT_SYMBOL_NS_GPL(cxl_set_timestamp, CXL); +int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len, + struct cxl_region *cxlr) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_poison_out *po; + struct cxl_mbox_poison_in pi; + struct cxl_mbox_cmd mbox_cmd; + int nr_records = 0; + int rc; + + rc = mutex_lock_interruptible(&cxlds->poison.lock); + if (rc) + return rc; + + po = cxlds->poison.list_out; + pi.offset = cpu_to_le64(offset); + pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT); + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_GET_POISON, + .size_in = sizeof(pi), + .payload_in = &pi, + .size_out = cxlds->payload_size, + .payload_out = po, + .min_out = struct_size(po, record, 0), + }; + + do { + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + if (rc) + break; + + for (int i = 0; i < le16_to_cpu(po->count); i++) + trace_cxl_poison(cxlmd, cxlr, &po->record[i], + po->flags, po->overflow_ts, + CXL_POISON_TRACE_LIST); + + /* Protect against an uncleared _FLAG_MORE */ + nr_records = nr_records + le16_to_cpu(po->count); + if (nr_records >= cxlds->poison.max_errors) { + dev_dbg(&cxlmd->dev, "Max Error Records reached: %d\n", + nr_records); + break; + } + } while (po->flags & CXL_POISON_FLAG_MORE); + + mutex_unlock(&cxlds->poison.lock); + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_mem_get_poison, CXL); + +static void free_poison_buf(void *buf) +{ + kvfree(buf); +} + +/* Get Poison List output buffer is protected by cxlds->poison.lock */ +static int cxl_poison_alloc_buf(struct cxl_dev_state *cxlds) +{ + cxlds->poison.list_out = kvmalloc(cxlds->payload_size, GFP_KERNEL); + if (!cxlds->poison.list_out) + return -ENOMEM; + + return devm_add_action_or_reset(cxlds->dev, free_poison_buf, + cxlds->poison.list_out); +} + +int cxl_poison_state_init(struct cxl_dev_state *cxlds) +{ + int rc; + + if (!test_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds)) + return 0; + + rc = cxl_poison_alloc_buf(cxlds); + if (rc) { + clear_bit(CXL_POISON_ENABLED_LIST, cxlds->poison.enabled_cmds); + return rc; + } + + mutex_init(&cxlds->poison.lock); + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_poison_state_init, CXL); + struct cxl_dev_state *cxl_dev_state_create(struct device *dev) { struct cxl_dev_state *cxlds; diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c index 28a05f2fe32d..057a43267290 100644 --- a/drivers/cxl/core/memdev.c +++ b/drivers/cxl/core/memdev.c @@ -6,6 +6,7 @@ #include <linux/idr.h> #include <linux/pci.h> #include <cxlmem.h> +#include "trace.h" #include "core.h" static DECLARE_RWSEM(cxl_memdev_rwsem); @@ -106,6 +107,232 @@ static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(numa_node); +static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + u64 offset, length; + int rc = 0; + + /* CXL 3.0 Spec 8.2.9.8.4.1 Separate pmem and ram poison requests */ + if (resource_size(&cxlds->pmem_res)) { + offset = cxlds->pmem_res.start; + length = resource_size(&cxlds->pmem_res); + rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); + if (rc) + return rc; + } + if (resource_size(&cxlds->ram_res)) { + offset = cxlds->ram_res.start; + length = resource_size(&cxlds->ram_res); + rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); + /* + * Invalid Physical Address is not an error for + * volatile addresses. Device support is optional. + */ + if (rc == -EFAULT) + rc = 0; + } + return rc; +} + +int cxl_trigger_poison_list(struct cxl_memdev *cxlmd) +{ + struct cxl_port *port; + int rc; + + port = dev_get_drvdata(&cxlmd->dev); + if (!port || !is_cxl_endpoint(port)) + return -EINVAL; + + rc = down_read_interruptible(&cxl_dpa_rwsem); + if (rc) + return rc; + + if (port->commit_end == -1) { + /* No regions mapped to this memdev */ + rc = cxl_get_poison_by_memdev(cxlmd); + } else { + /* Regions mapped, collect poison by endpoint */ + rc = cxl_get_poison_by_endpoint(port); + } + up_read(&cxl_dpa_rwsem); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_trigger_poison_list, CXL); + +struct cxl_dpa_to_region_context { + struct cxl_region *cxlr; + u64 dpa; +}; + +static int __cxl_dpa_to_region(struct device *dev, void *arg) +{ + struct cxl_dpa_to_region_context *ctx = arg; + struct cxl_endpoint_decoder *cxled; + u64 dpa = ctx->dpa; + + if (!is_endpoint_decoder(dev)) + return 0; + + cxled = to_cxl_endpoint_decoder(dev); + if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) + return 0; + + if (dpa > cxled->dpa_res->end || dpa < cxled->dpa_res->start) + return 0; + + dev_dbg(dev, "dpa:0x%llx mapped in region:%s\n", dpa, + dev_name(&cxled->cxld.region->dev)); + + ctx->cxlr = cxled->cxld.region; + + return 1; +} + +static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dpa_to_region_context ctx; + struct cxl_port *port; + + ctx = (struct cxl_dpa_to_region_context) { + .dpa = dpa, + }; + port = dev_get_drvdata(&cxlmd->dev); + if (port && is_cxl_endpoint(port) && port->commit_end != -1) + device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region); + + return ctx.cxlr; +} + +static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return 0; + + if (!resource_size(&cxlds->dpa_res)) { + dev_dbg(cxlds->dev, "device has no dpa resource\n"); + return -EINVAL; + } + if (dpa < cxlds->dpa_res.start || dpa > cxlds->dpa_res.end) { + dev_dbg(cxlds->dev, "dpa:0x%llx not in resource:%pR\n", + dpa, &cxlds->dpa_res); + return -EINVAL; + } + if (!IS_ALIGNED(dpa, 64)) { + dev_dbg(cxlds->dev, "dpa:0x%llx is not 64-byte aligned\n", dpa); + return -EINVAL; + } + + return 0; +} + +int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_inject_poison inject; + struct cxl_poison_record record; + struct cxl_mbox_cmd mbox_cmd; + struct cxl_region *cxlr; + int rc; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return 0; + + rc = down_read_interruptible(&cxl_dpa_rwsem); + if (rc) + return rc; + + rc = cxl_validate_poison_dpa(cxlmd, dpa); + if (rc) + goto out; + + inject.address = cpu_to_le64(dpa); + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_INJECT_POISON, + .size_in = sizeof(inject), + .payload_in = &inject, + }; + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + if (rc) + goto out; + + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) + dev_warn_once(cxlds->dev, + "poison inject dpa:%#llx region: %s\n", dpa, + dev_name(&cxlr->dev)); + + record = (struct cxl_poison_record) { + .address = cpu_to_le64(dpa), + .length = cpu_to_le32(1), + }; + trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_INJECT); +out: + up_read(&cxl_dpa_rwsem); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, CXL); + +int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa) +{ + struct cxl_dev_state *cxlds = cxlmd->cxlds; + struct cxl_mbox_clear_poison clear; + struct cxl_poison_record record; + struct cxl_mbox_cmd mbox_cmd; + struct cxl_region *cxlr; + int rc; + + if (!IS_ENABLED(CONFIG_DEBUG_FS)) + return 0; + + rc = down_read_interruptible(&cxl_dpa_rwsem); + if (rc) + return rc; + + rc = cxl_validate_poison_dpa(cxlmd, dpa); + if (rc) + goto out; + + /* + * In CXL 3.0 Spec 8.2.9.8.4.3, the Clear Poison mailbox command + * is defined to accept 64 bytes of write-data, along with the + * address to clear. This driver uses zeroes as write-data. + */ + clear = (struct cxl_mbox_clear_poison) { + .address = cpu_to_le64(dpa) + }; + + mbox_cmd = (struct cxl_mbox_cmd) { + .opcode = CXL_MBOX_OP_CLEAR_POISON, + .size_in = sizeof(clear), + .payload_in = &clear, + }; + + rc = cxl_internal_send_cmd(cxlds, &mbox_cmd); + if (rc) + goto out; + + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) + dev_warn_once(cxlds->dev, "poison clear dpa:%#llx region: %s\n", + dpa, dev_name(&cxlr->dev)); + + record = (struct cxl_poison_record) { + .address = cpu_to_le64(dpa), + .length = cpu_to_le32(1), + }; + trace_cxl_poison(cxlmd, cxlr, &record, 0, 0, CXL_POISON_TRACE_CLEAR); +out: + up_read(&cxl_dpa_rwsem); + + return rc; +} +EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, CXL); + static struct attribute *cxl_memdev_attributes[] = { &dev_attr_serial.attr, &dev_attr_firmware_version.attr, diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 7328a2552411..f332fe7af92b 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -441,79 +441,33 @@ EXPORT_SYMBOL_NS_GPL(cxl_hdm_decode_init, CXL); #define CXL_DOE_TABLE_ACCESS_LAST_ENTRY 0xffff #define CXL_DOE_PROTOCOL_TABLE_ACCESS 2 -static struct pci_doe_mb *find_cdat_doe(struct device *uport) -{ - struct cxl_memdev *cxlmd; - struct cxl_dev_state *cxlds; - unsigned long index; - void *entry; - - cxlmd = to_cxl_memdev(uport); - cxlds = cxlmd->cxlds; - - xa_for_each(&cxlds->doe_mbs, index, entry) { - struct pci_doe_mb *cur = entry; - - if (pci_doe_supports_prot(cur, PCI_DVSEC_VENDOR_ID_CXL, - CXL_DOE_PROTOCOL_TABLE_ACCESS)) - return cur; - } - - return NULL; -} - -#define CDAT_DOE_REQ(entry_handle) \ +#define CDAT_DOE_REQ(entry_handle) cpu_to_le32 \ (FIELD_PREP(CXL_DOE_TABLE_ACCESS_REQ_CODE, \ CXL_DOE_TABLE_ACCESS_REQ_CODE_READ) | \ FIELD_PREP(CXL_DOE_TABLE_ACCESS_TABLE_TYPE, \ CXL_DOE_TABLE_ACCESS_TABLE_TYPE_CDATA) | \ FIELD_PREP(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, (entry_handle))) -static void cxl_doe_task_complete(struct pci_doe_task *task) -{ - complete(task->private); -} - -struct cdat_doe_task { - u32 request_pl; - u32 response_pl[32]; - struct completion c; - struct pci_doe_task task; -}; - -#define DECLARE_CDAT_DOE_TASK(req, cdt) \ -struct cdat_doe_task cdt = { \ - .c = COMPLETION_INITIALIZER_ONSTACK(cdt.c), \ - .request_pl = req, \ - .task = { \ - .prot.vid = PCI_DVSEC_VENDOR_ID_CXL, \ - .prot.type = CXL_DOE_PROTOCOL_TABLE_ACCESS, \ - .request_pl = &cdt.request_pl, \ - .request_pl_sz = sizeof(cdt.request_pl), \ - .response_pl = cdt.response_pl, \ - .response_pl_sz = sizeof(cdt.response_pl), \ - .complete = cxl_doe_task_complete, \ - .private = &cdt.c, \ - } \ -} - static int cxl_cdat_get_length(struct device *dev, struct pci_doe_mb *cdat_doe, size_t *length) { - DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(0), t); + __le32 request = CDAT_DOE_REQ(0); + __le32 response[2]; int rc; - rc = pci_doe_submit_task(cdat_doe, &t.task); + rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS, + &request, sizeof(request), + &response, sizeof(response)); if (rc < 0) { - dev_err(dev, "DOE submit failed: %d", rc); + dev_err(dev, "DOE failed: %d", rc); return rc; } - wait_for_completion(&t.c); - if (t.task.rv < sizeof(u32)) + if (rc < sizeof(response)) return -EIO; - *length = t.response_pl[1]; + *length = le32_to_cpu(response[1]); dev_dbg(dev, "CDAT length %zu\n", *length); return 0; @@ -521,44 +475,56 @@ static int cxl_cdat_get_length(struct device *dev, static int cxl_cdat_read_table(struct device *dev, struct pci_doe_mb *cdat_doe, - struct cxl_cdat *cdat) + void *cdat_table, size_t *cdat_length) { - size_t length = cdat->length; - u32 *data = cdat->table; + size_t length = *cdat_length + sizeof(__le32); + __le32 *data = cdat_table; int entry_handle = 0; + __le32 saved_dw = 0; do { - DECLARE_CDAT_DOE_TASK(CDAT_DOE_REQ(entry_handle), t); + __le32 request = CDAT_DOE_REQ(entry_handle); + struct cdat_entry_header *entry; size_t entry_dw; - u32 *entry; int rc; - rc = pci_doe_submit_task(cdat_doe, &t.task); + rc = pci_doe(cdat_doe, PCI_DVSEC_VENDOR_ID_CXL, + CXL_DOE_PROTOCOL_TABLE_ACCESS, + &request, sizeof(request), + data, length); if (rc < 0) { - dev_err(dev, "DOE submit failed: %d", rc); + dev_err(dev, "DOE failed: %d", rc); return rc; } - wait_for_completion(&t.c); - /* 1 DW header + 1 DW data min */ - if (t.task.rv < (2 * sizeof(u32))) + + /* 1 DW Table Access Response Header + CDAT entry */ + entry = (struct cdat_entry_header *)(data + 1); + if ((entry_handle == 0 && + rc != sizeof(__le32) + sizeof(struct cdat_header)) || + (entry_handle > 0 && + (rc < sizeof(__le32) + sizeof(*entry) || + rc != sizeof(__le32) + le16_to_cpu(entry->length)))) return -EIO; /* Get the CXL table access header entry handle */ entry_handle = FIELD_GET(CXL_DOE_TABLE_ACCESS_ENTRY_HANDLE, - t.response_pl[0]); - entry = t.response_pl + 1; - entry_dw = t.task.rv / sizeof(u32); + le32_to_cpu(data[0])); + entry_dw = rc / sizeof(__le32); /* Skip Header */ entry_dw -= 1; - entry_dw = min(length / sizeof(u32), entry_dw); - /* Prevent length < 1 DW from causing a buffer overflow */ - if (entry_dw) { - memcpy(data, entry, entry_dw * sizeof(u32)); - length -= entry_dw * sizeof(u32); - data += entry_dw; - } + /* + * Table Access Response Header overwrote the last DW of + * previous entry, so restore that DW + */ + *data = saved_dw; + length -= entry_dw * sizeof(__le32); + data += entry_dw; + saved_dw = *data; } while (entry_handle != CXL_DOE_TABLE_ACCESS_LAST_ENTRY); + /* Length in CDAT header may exceed concatenation of CDAT entries */ + *cdat_length -= length - sizeof(__le32); + return 0; } @@ -570,13 +536,19 @@ static int cxl_cdat_read_table(struct device *dev, */ void read_cdat_data(struct cxl_port *port) { - struct pci_doe_mb *cdat_doe; + struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport); |