On Wed, Oct 22, 2025 at 12:33:49PM +0100, Jonathan Cameron wrote: > +static int hisi_soc_hha_wbinv(struct cache_coherency_ops_inst *cci, > + struct cc_inval_params *invp) > +{ > + struct hisi_soc_hha *soc_hha = > + container_of(cci, struct hisi_soc_hha, cci); > + phys_addr_t top, addr = invp->addr; > + size_t size = invp->size; > + u32 reg; > + > + if (!size) > + return -EINVAL; > + > + addr = ALIGN_DOWN(addr, HISI_HHA_MAINT_ALIGN); > + top = ALIGN(addr + size, HISI_HHA_MAINT_ALIGN); > + size = top - addr; > + > + guard(mutex)(&soc_hha->lock); > + > + if (!hisi_hha_cache_maintain_wait_finished(soc_hha)) > + return -EBUSY; > + > + /* > + * Hardware will search for addresses ranging [addr, addr + size - 1], > + * last byte included, and perform maintain in 128 byte granule > + * on those cachelines which contain the addresses. > + */ Hmm, does this mean that the IP has some built-in handling for there being more than one "agent" in a system? IOW, if the address is not in its range, then the search will just fail into a NOP? If that's not the case, is this particular "agent" by design not suitable for a system like that? Or will a dual hydra home agent system come with a new ACPI ID that we can use to deal with that kind of situation? (Although I don't know enough about ACPI to know where you'd even get the information about what instance handles what range from...) > + size -= 1; > + > + writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L); > + writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H); > + writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L); > + writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H); > + > + reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */ > + reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN; > + writel(reg, soc_hha->base + HISI_HHA_CTRL); > + > + return 0; > +} > + > +static int hisi_soc_hha_done(struct cache_coherency_ops_inst *cci) > +{ > + struct hisi_soc_hha *soc_hha = > + container_of(cci, struct hisi_soc_hha, cci); > + > + guard(mutex)(&soc_hha->lock); > + if (!hisi_hha_cache_maintain_wait_finished(soc_hha)) > + return -ETIMEDOUT; > + > + return 0; > +} > + > +static const struct cache_coherency_ops hha_ops = { > + .wbinv = hisi_soc_hha_wbinv, > + .done = hisi_soc_hha_done, > +}; > + > +static int hisi_soc_hha_probe(struct platform_device *pdev) > +{ > + struct hisi_soc_hha *soc_hha; > + struct resource *mem; > + int ret; > + > + soc_hha = cache_coherency_ops_instance_alloc(&hha_ops, > + struct hisi_soc_hha, cci); > + if (!soc_hha) > + return -ENOMEM; > + > + platform_set_drvdata(pdev, soc_hha); > + > + mutex_init(&soc_hha->lock); > + > + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); > + if (!mem) { > + ret = -ENOMEM; > + goto err_free_cci; > + } > + > + /* > + * HHA cache driver share the same register region with HHA uncore PMU > + * driver in hardware's perspective, none of them should reserve the > + * resource to itself only. Here exclusive access verification is > + * avoided by calling devm_ioremap instead of devm_ioremap_resource to The comment here doesn't exactly match the code, dunno if you went away from devm some reason and just forgot to to make the change or the other way around? Not a big deal obviously, but maybe you forgot to do something you intended doing. It's mentioned in the commit message too. Other than the question I have about the multi-"agent" stuff, this looks fine to me. I assume it's been thought about and is fine for w/e reason, but I'd like to know what that is. Cheers, Conor. > + * allow both drivers to exist at the same time. > + */ > + soc_hha->base = ioremap(mem->start, resource_size(mem)); > + if (!soc_hha->base) { > + ret = dev_err_probe(&pdev->dev, -ENOMEM, > + "failed to remap io memory"); > + goto err_free_cci; > + } > + > + ret = cache_coherency_ops_instance_register(&soc_hha->cci); > + if (ret) > + goto err_iounmap; > + > + return 0; > + > +err_iounmap: > + iounmap(soc_hha->base); > +err_free_cci: > + cache_coherency_ops_instance_put(&soc_hha->cci); > + return ret; > +} > + > +static void hisi_soc_hha_remove(struct platform_device *pdev) > +{ > + struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev); > + > + cache_coherency_ops_instance_unregister(&soc_hha->cci); > + iounmap(soc_hha->base); > + cache_coherency_ops_instance_put(&soc_hha->cci); > +}