|
| 1 | +/* |
| 2 | + * Copyright (c) 2006-2023, RT-Thread Development Team |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: Apache-2.0 |
| 5 | + * |
| 6 | + * Change Logs: |
| 7 | + * Date Author Notes |
| 8 | + * 2023-02-25 GuEe-GUI the first version |
| 9 | + */ |
| 10 | + |
| 11 | +#include <rthw.h> |
| 12 | +#include <rtthread.h> |
| 13 | +#include <rtdevice.h> |
| 14 | + |
| 15 | +#define DBG_TAG "mfd.edu" |
| 16 | +#define DBG_LVL DBG_INFO |
| 17 | +#include <rtdbg.h> |
| 18 | + |
| 19 | +#include <cpuport.h> |
| 20 | + |
| 21 | +#define PCI_EDU_REGS_BAR 0 |
| 22 | +#define EDU_REG_VERSION 0x00 |
| 23 | +#define EDU_REG_CARD_LIVENESS 0x04 |
| 24 | +#define EDU_REG_VALUE 0x08 |
| 25 | +#define EDU_REG_STATUS 0x20 |
| 26 | +#define EDU_REG_STATUS_IRQ 0x80 |
| 27 | +#define EDU_REG_IRQ_STATUS 0x24 |
| 28 | +#define EDU_REG_ISR_FACT 0x00000001 |
| 29 | +#define EDU_REG_ISR_DMA 0x00000100 |
| 30 | +#define EDU_REG_IRQ_RAISE 0x60 |
| 31 | +#define EDU_REG_IRQ_ACK 0x64 |
| 32 | +#define EDU_REG_DMA_SRC 0x80 |
| 33 | +#define EDU_REG_DMA_DST 0x88 |
| 34 | +#define EDU_REG_DMA_SIZE 0x90 |
| 35 | +#define EDU_REG_DMA_CMD 0x98 |
| 36 | +#define EDU_DMA_CMD_RUN 0x1 |
| 37 | +#define EDU_DMA_CMD_TO_PCI 0x0 |
| 38 | +#define EDU_DMA_CMD_FROM_PCI 0x2 |
| 39 | +#define EDU_DMA_CMD_IRQ 0x4 |
| 40 | + |
| 41 | +#define EDU_FACTORIAL_ACK 0x00000001 |
| 42 | + |
| 43 | +#define EDU_DMA_ACK 0x00000100 |
| 44 | +#define EDU_DMA_FREE (~0UL) |
| 45 | +#define EDU_DMA_BASE 0x40000 |
| 46 | +#define EDU_DMA_SIZE ((rt_size_t)(4096 - 1)) |
| 47 | +#define EDU_DMA_POLL_SIZE 128 |
| 48 | + |
| 49 | +struct edu_device |
| 50 | +{ |
| 51 | + struct rt_device parent; |
| 52 | + struct rt_dma_controller dma_ctrl; |
| 53 | + |
| 54 | + void *regs; |
| 55 | + rt_uint32_t ack; |
| 56 | + rt_bool_t dma_work; |
| 57 | + |
| 58 | + struct rt_mutex lock; |
| 59 | + struct rt_completion done; |
| 60 | +}; |
| 61 | + |
| 62 | +#define raw_to_edu_device(raw) rt_container_of(raw, struct edu_device, parent) |
| 63 | +#define raw_to_edu_dma(raw) rt_container_of(raw, struct edu_device, dma_ctrl) |
| 64 | + |
| 65 | +rt_inline rt_uint32_t edu_readl(struct edu_device *edu, int offset) |
| 66 | +{ |
| 67 | + return HWREG32(edu->regs + offset); |
| 68 | +} |
| 69 | + |
| 70 | +rt_inline void edu_writel(struct edu_device *edu, int offset, rt_uint32_t value) |
| 71 | +{ |
| 72 | + HWREG32(edu->regs + offset) = value; |
| 73 | +} |
| 74 | + |
| 75 | +static rt_err_t edu_dma_start(struct rt_dma_chan *chan) |
| 76 | +{ |
| 77 | + rt_size_t len; |
| 78 | + rt_ubase_t dma_addr_src, dma_addr_dst; |
| 79 | + struct edu_device *edu = raw_to_edu_dma(chan->ctrl); |
| 80 | + |
| 81 | + rt_mutex_take(&edu->lock, RT_WAITING_FOREVER); |
| 82 | + |
| 83 | + edu->ack = EDU_DMA_ACK; |
| 84 | + edu->dma_work = RT_TRUE; |
| 85 | + |
| 86 | + len = chan->transfer.buffer_len; |
| 87 | + dma_addr_src = chan->transfer.src_addr; |
| 88 | + dma_addr_dst = chan->transfer.dst_addr; |
| 89 | + |
| 90 | + while ((rt_ssize_t)len > 0 && edu->dma_work) |
| 91 | + { |
| 92 | + rt_uint32_t cmd = EDU_DMA_CMD_RUN; |
| 93 | + rt_uint32_t blen = rt_min_t(rt_size_t, EDU_DMA_SIZE, len); |
| 94 | + |
| 95 | + if (blen > EDU_DMA_POLL_SIZE) |
| 96 | + { |
| 97 | + cmd |= EDU_DMA_CMD_IRQ; |
| 98 | + } |
| 99 | + |
| 100 | + edu_writel(edu, EDU_REG_DMA_SRC, dma_addr_src); |
| 101 | + edu_writel(edu, EDU_REG_DMA_DST, EDU_DMA_BASE); |
| 102 | + edu_writel(edu, EDU_REG_DMA_SIZE, blen); |
| 103 | + edu_writel(edu, EDU_REG_DMA_CMD, cmd | EDU_DMA_CMD_TO_PCI); |
| 104 | + |
| 105 | + if (cmd & EDU_DMA_CMD_IRQ) |
| 106 | + { |
| 107 | + rt_completion_wait(&edu->done, RT_WAITING_FOREVER); |
| 108 | + } |
| 109 | + else |
| 110 | + { |
| 111 | + while (edu_readl(edu, EDU_REG_DMA_CMD) & EDU_DMA_CMD_RUN) |
| 112 | + { |
| 113 | + rt_hw_cpu_relax(); |
| 114 | + } |
| 115 | + } |
| 116 | + |
| 117 | + edu_writel(edu, EDU_REG_DMA_SRC, EDU_DMA_BASE); |
| 118 | + edu_writel(edu, EDU_REG_DMA_DST, dma_addr_dst); |
| 119 | + edu_writel(edu, EDU_REG_DMA_SIZE, blen); |
| 120 | + edu_writel(edu, EDU_REG_DMA_CMD, cmd | EDU_DMA_CMD_FROM_PCI); |
| 121 | + |
| 122 | + if (cmd & EDU_DMA_CMD_IRQ) |
| 123 | + { |
| 124 | + rt_completion_wait(&edu->done, RT_WAITING_FOREVER); |
| 125 | + } |
| 126 | + else |
| 127 | + { |
| 128 | + while (edu_readl(edu, EDU_REG_DMA_CMD) & EDU_DMA_CMD_RUN) |
| 129 | + { |
| 130 | + rt_hw_cpu_relax(); |
| 131 | + } |
| 132 | + } |
| 133 | + |
| 134 | + len -= blen; |
| 135 | + dma_addr_src += blen; |
| 136 | + dma_addr_dst += blen; |
| 137 | + } |
| 138 | + |
| 139 | + rt_mutex_release(&edu->lock); |
| 140 | + |
| 141 | + rt_dma_chan_done(chan, chan->transfer.buffer_len - len); |
| 142 | + |
| 143 | + return RT_EOK; |
| 144 | +} |
| 145 | + |
| 146 | +static rt_err_t edu_dma_stop(struct rt_dma_chan *chan) |
| 147 | +{ |
| 148 | + struct edu_device *edu = raw_to_edu_dma(chan->ctrl); |
| 149 | + |
| 150 | + edu->dma_work = RT_FALSE; |
| 151 | + |
| 152 | + return RT_EOK; |
| 153 | +} |
| 154 | + |
| 155 | +static rt_err_t edu_dma_config(struct rt_dma_chan *chan, |
| 156 | + struct rt_dma_slave_config *conf) |
| 157 | +{ |
| 158 | + return RT_EOK; |
| 159 | +} |
| 160 | + |
| 161 | +static rt_err_t edu_dma_prep_memcpy(struct rt_dma_chan *chan, |
| 162 | + rt_ubase_t dma_addr_src, rt_ubase_t dma_addr_dst, rt_size_t len) |
| 163 | +{ |
| 164 | + return RT_EOK; |
| 165 | +} |
| 166 | + |
| 167 | +const static struct rt_dma_controller_ops edu_dma_ops = |
| 168 | +{ |
| 169 | + .start = edu_dma_start, |
| 170 | + .stop = edu_dma_stop, |
| 171 | + .config = edu_dma_config, |
| 172 | + .prep_memcpy = edu_dma_prep_memcpy, |
| 173 | +}; |
| 174 | + |
| 175 | +static rt_ssize_t edu_read(rt_device_t dev, rt_off_t pos, void *buffer, rt_size_t size) |
| 176 | +{ |
| 177 | + rt_uint32_t number; |
| 178 | + struct edu_device *edu = raw_to_edu_device(dev); |
| 179 | + |
| 180 | + rt_mutex_take(&edu->lock, RT_WAITING_FOREVER); |
| 181 | + |
| 182 | + number = edu_readl(edu, EDU_REG_VALUE); |
| 183 | + |
| 184 | + rt_mutex_release(&edu->lock); |
| 185 | + |
| 186 | + rt_memcpy(buffer, &number, rt_min(sizeof(number), size)); |
| 187 | + |
| 188 | + return rt_min(sizeof(number), size); |
| 189 | +} |
| 190 | + |
| 191 | +static rt_ssize_t edu_write(rt_device_t dev, rt_off_t pos, const void *buffer, rt_size_t size) |
| 192 | +{ |
| 193 | + rt_uint32_t number = 0; |
| 194 | + struct edu_device *edu = raw_to_edu_device(dev); |
| 195 | + |
| 196 | + rt_memcpy(&number, buffer, rt_min(sizeof(number), size)); |
| 197 | + |
| 198 | + rt_mutex_take(&edu->lock, RT_WAITING_FOREVER); |
| 199 | + |
| 200 | + edu->ack = EDU_FACTORIAL_ACK; |
| 201 | + edu_writel(edu, EDU_REG_STATUS, EDU_REG_STATUS_IRQ); |
| 202 | + edu_writel(edu, EDU_REG_VALUE, number); |
| 203 | + |
| 204 | + rt_completion_wait(&edu->done, RT_WAITING_FOREVER); |
| 205 | + |
| 206 | + rt_mutex_release(&edu->lock); |
| 207 | + |
| 208 | + return rt_min(sizeof(number), size); |
| 209 | +} |
| 210 | + |
| 211 | +#ifdef RT_USING_DEVICE_OPS |
| 212 | +const static struct rt_device_ops edu_ops = |
| 213 | +{ |
| 214 | + .read = edu_read, |
| 215 | + .write = edu_write, |
| 216 | +}; |
| 217 | +#endif |
| 218 | + |
| 219 | +static void edu_isr(int irqno, void *param) |
| 220 | +{ |
| 221 | + struct edu_device *edu = param; |
| 222 | + |
| 223 | + if (edu_readl(edu, EDU_REG_IRQ_STATUS) & (EDU_REG_ISR_FACT | EDU_REG_ISR_DMA)) |
| 224 | + { |
| 225 | + edu_writel(edu, EDU_REG_IRQ_ACK, edu->ack); |
| 226 | + rt_completion_done(&edu->done); |
| 227 | + } |
| 228 | +} |
| 229 | + |
| 230 | +static rt_err_t edu_probe(struct rt_pci_device *pdev) |
| 231 | +{ |
| 232 | + rt_err_t err; |
| 233 | + struct edu_device *edu = rt_calloc(1, sizeof(*edu)); |
| 234 | + |
| 235 | + if (!edu) |
| 236 | + { |
| 237 | + return -RT_ENOMEM; |
| 238 | + } |
| 239 | + |
| 240 | + edu->regs = rt_pci_iomap(pdev, PCI_EDU_REGS_BAR); |
| 241 | + |
| 242 | + if (!edu->regs) |
| 243 | + { |
| 244 | + err = -RT_EIO; |
| 245 | + goto _fail; |
| 246 | + } |
| 247 | + |
| 248 | + edu->dma_ctrl.dev = &pdev->parent; |
| 249 | + edu->dma_ctrl.ops = &edu_dma_ops; |
| 250 | + rt_dma_controller_add_direction(&edu->dma_ctrl, RT_DMA_MEM_TO_MEM); |
| 251 | + /* Config in QEMU option: -device edu,dma_mask=0xffffffff */ |
| 252 | + rt_dma_controller_set_addr_mask(&edu->dma_ctrl, RT_DMA_ADDR_MASK(32)); |
| 253 | + |
| 254 | + if ((err = rt_dma_controller_register(&edu->dma_ctrl))) |
| 255 | + { |
| 256 | + goto _fail; |
| 257 | + } |
| 258 | + |
| 259 | + edu->parent.type = RT_Device_Class_Char; |
| 260 | +#ifdef RT_USING_DEVICE_OPS |
| 261 | + edu->parent.ops = &edu_ops; |
| 262 | +#else |
| 263 | + edu->parent.read = edu_read; |
| 264 | + edu->parent.write = edu_write; |
| 265 | +#endif |
| 266 | + |
| 267 | + if ((err = rt_device_register(&edu->parent, "edu", RT_DEVICE_FLAG_RDWR))) |
| 268 | + { |
| 269 | + goto _free_dma; |
| 270 | + } |
| 271 | + |
| 272 | + pdev->parent.user_data = edu; |
| 273 | + |
| 274 | + rt_mutex_init(&edu->lock, "edu", RT_IPC_FLAG_PRIO); |
| 275 | + rt_completion_init(&edu->done); |
| 276 | + |
| 277 | + rt_hw_interrupt_install(pdev->irq, edu_isr, edu, "edu"); |
| 278 | + rt_pci_irq_unmask(pdev); |
| 279 | + |
| 280 | + LOG_D("EDU PCI device v%d.%d", edu_readl(edu, EDU_REG_VERSION) >> 16, |
| 281 | + (edu_readl(edu, EDU_REG_VERSION) >> 8) & 0xff); |
| 282 | + |
| 283 | + return RT_EOK; |
| 284 | + |
| 285 | +_free_dma: |
| 286 | + rt_dma_controller_unregister(&edu->dma_ctrl); |
| 287 | + |
| 288 | +_fail: |
| 289 | + if (edu->regs) |
| 290 | + { |
| 291 | + rt_iounmap(edu->regs); |
| 292 | + } |
| 293 | + |
| 294 | + rt_free(edu); |
| 295 | + |
| 296 | + return err; |
| 297 | +} |
| 298 | + |
| 299 | +static rt_err_t edu_remove(struct rt_pci_device *pdev) |
| 300 | +{ |
| 301 | + struct edu_device *edu = pdev->parent.user_data; |
| 302 | + |
| 303 | + /* INTx is shared, don't mask all */ |
| 304 | + rt_hw_interrupt_umask(pdev->irq); |
| 305 | + rt_pci_irq_mask(pdev); |
| 306 | + |
| 307 | + rt_dma_controller_unregister(&edu->dma_ctrl); |
| 308 | + rt_device_unregister(&edu->parent); |
| 309 | + |
| 310 | + rt_mutex_detach(&edu->lock); |
| 311 | + |
| 312 | + rt_iounmap(edu->regs); |
| 313 | + rt_free(edu); |
| 314 | + |
| 315 | + return RT_EOK; |
| 316 | +} |
| 317 | + |
| 318 | +static const struct rt_pci_device_id edu_ids[] = |
| 319 | +{ |
| 320 | + { RT_PCI_DEVICE_ID(PCI_VENDOR_ID_QEMU, 0x11e8), }, |
| 321 | + { /* sentinel */ } |
| 322 | +}; |
| 323 | + |
| 324 | +static struct rt_pci_driver edu_driver = |
| 325 | +{ |
| 326 | + .name = "edu", |
| 327 | + |
| 328 | + .ids = edu_ids, |
| 329 | + .probe = edu_probe, |
| 330 | + .remove = edu_remove, |
| 331 | +}; |
| 332 | +RT_PCI_DRIVER_EXPORT(edu_driver); |
0 commit comments