|
@@ -159,3 +159,167 @@ void at32_deselect_pin(unsigned int pin)
|
|
|
|
|
|
pio = gpio_to_pio(pin);
|
|
pio = gpio_to_pio(pin);
|
|
if (unlikely(!pio)) {
|
|
if (unlikely(!pio)) {
|
|
|
|
+ printk("pio: invalid pin %u\n", pin);
|
|
|
|
+ dump_stack();
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ clear_bit(pin_index, &pio->pinmux_mask);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Reserve a pin, preventing anyone else from changing its configuration. */
|
|
|
|
+void __init at32_reserve_pin(unsigned int port, u32 pin_mask)
|
|
|
|
+{
|
|
|
|
+ struct pio_device *pio;
|
|
|
|
+
|
|
|
|
+ /* assign and verify pio */
|
|
|
|
+ pio = gpio_to_pio(port);
|
|
|
|
+ if (unlikely(!pio)) {
|
|
|
|
+ printk(KERN_WARNING "pio: invalid port %u\n", port);
|
|
|
|
+ goto fail;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Test if any of the requested pins is already muxed */
|
|
|
|
+ spin_lock(&pio_lock);
|
|
|
|
+ if (unlikely(pio->pinmux_mask & pin_mask)) {
|
|
|
|
+ printk(KERN_WARNING "%s: pin(s) busy (req. 0x%x, busy 0x%x)\n",
|
|
|
|
+ pio->name, pin_mask, pio->pinmux_mask & pin_mask);
|
|
|
|
+ spin_unlock(&pio_lock);
|
|
|
|
+ goto fail;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Reserve pins */
|
|
|
|
+ pio->pinmux_mask |= pin_mask;
|
|
|
|
+ spin_unlock(&pio_lock);
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+fail:
|
|
|
|
+ dump_stack();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*--------------------------------------------------------------------------*/
|
|
|
|
+
|
|
|
|
+/* GPIO API */
|
|
|
|
+
|
|
|
|
+static int direction_input(struct gpio_chip *chip, unsigned offset)
|
|
|
|
+{
|
|
|
|
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
|
|
|
|
+ u32 mask = 1 << offset;
|
|
|
|
+
|
|
|
|
+ if (!(pio_readl(pio, PSR) & mask))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ pio_writel(pio, ODR, mask);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int gpio_get(struct gpio_chip *chip, unsigned offset)
|
|
|
|
+{
|
|
|
|
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
|
|
|
|
+
|
|
|
|
+ return (pio_readl(pio, PDSR) >> offset) & 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gpio_set(struct gpio_chip *chip, unsigned offset, int value);
|
|
|
|
+
|
|
|
|
+static int direction_output(struct gpio_chip *chip, unsigned offset, int value)
|
|
|
|
+{
|
|
|
|
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
|
|
|
|
+ u32 mask = 1 << offset;
|
|
|
|
+
|
|
|
|
+ if (!(pio_readl(pio, PSR) & mask))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ gpio_set(chip, offset, value);
|
|
|
|
+ pio_writel(pio, OER, mask);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
|
|
|
|
+{
|
|
|
|
+ struct pio_device *pio = container_of(chip, struct pio_device, chip);
|
|
|
|
+ u32 mask = 1 << offset;
|
|
|
|
+
|
|
|
|
+ if (value)
|
|
|
|
+ pio_writel(pio, SODR, mask);
|
|
|
|
+ else
|
|
|
|
+ pio_writel(pio, CODR, mask);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*--------------------------------------------------------------------------*/
|
|
|
|
+
|
|
|
|
+/* GPIO IRQ support */
|
|
|
|
+
|
|
|
|
+static void gpio_irq_mask(struct irq_data *d)
|
|
|
|
+{
|
|
|
|
+ unsigned gpio = irq_to_gpio(d->irq);
|
|
|
|
+ struct pio_device *pio = &pio_dev[gpio >> 5];
|
|
|
|
+
|
|
|
|
+ pio_writel(pio, IDR, 1 << (gpio & 0x1f));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gpio_irq_unmask(struct irq_data *d)
|
|
|
|
+{
|
|
|
|
+ unsigned gpio = irq_to_gpio(d->irq);
|
|
|
|
+ struct pio_device *pio = &pio_dev[gpio >> 5];
|
|
|
|
+
|
|
|
|
+ pio_writel(pio, IER, 1 << (gpio & 0x1f));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int gpio_irq_type(struct irq_data *d, unsigned type)
|
|
|
|
+{
|
|
|
|
+ if (type != IRQ_TYPE_EDGE_BOTH && type != IRQ_TYPE_NONE)
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct irq_chip gpio_irqchip = {
|
|
|
|
+ .name = "gpio",
|
|
|
|
+ .irq_mask = gpio_irq_mask,
|
|
|
|
+ .irq_unmask = gpio_irq_unmask,
|
|
|
|
+ .irq_set_type = gpio_irq_type,
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
|
|
|
|
+{
|
|
|
|
+ struct pio_device *pio = irq_desc_get_chip_data(desc);
|
|
|
|
+ unsigned gpio_irq;
|
|
|
|
+
|
|
|
|
+ gpio_irq = (unsigned) irq_get_handler_data(irq);
|
|
|
|
+ for (;;) {
|
|
|
|
+ u32 isr;
|
|
|
|
+
|
|
|
|
+ /* ack pending GPIO interrupts */
|
|
|
|
+ isr = pio_readl(pio, ISR) & pio_readl(pio, IMR);
|
|
|
|
+ if (!isr)
|
|
|
|
+ break;
|
|
|
|
+ do {
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ i = ffs(isr) - 1;
|
|
|
|
+ isr &= ~(1 << i);
|
|
|
|
+
|
|
|
|
+ i += gpio_irq;
|
|
|
|
+ generic_handle_irq(i);
|
|
|
|
+ } while (isr);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void __init
|
|
|
|
+gpio_irq_setup(struct pio_device *pio, int irq, int gpio_irq)
|
|
|
|
+{
|
|
|
|
+ unsigned i;
|
|
|
|
+
|
|
|
|
+ irq_set_chip_data(irq, pio);
|
|
|
|
+ irq_set_handler_data(irq, (void *)gpio_irq);
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < 32; i++, gpio_irq++) {
|
|
|
|
+ irq_set_chip_data(gpio_irq, pio);
|
|
|
|
+ irq_set_chip_and_handler(gpio_irq, &gpio_irqchip,
|
|
|
|
+ handle_simple_irq);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ irq_set_chained_handler(irq, gpio_irq_handler);
|
|
|
|
+}
|
|
|
|
+
|