|
@@ -429,3 +429,148 @@ static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
|
|
|
|
|
|
l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
|
|
|
*base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
|
|
|
+ mask = (l >> 8) & 0x0f;
|
|
|
+ *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
|
|
|
+}
|
|
|
+
|
|
|
+static int gpmc_cs_mem_enabled(int cs)
|
|
|
+{
|
|
|
+ u32 l;
|
|
|
+
|
|
|
+ l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
|
|
|
+ return l & GPMC_CONFIG7_CSVALID;
|
|
|
+}
|
|
|
+
|
|
|
+int gpmc_cs_set_reserved(int cs, int reserved)
|
|
|
+{
|
|
|
+ if (cs > GPMC_CS_NUM)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ gpmc_cs_map &= ~(1 << cs);
|
|
|
+ gpmc_cs_map |= (reserved ? 1 : 0) << cs;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int gpmc_cs_reserved(int cs)
|
|
|
+{
|
|
|
+ if (cs > GPMC_CS_NUM)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ return gpmc_cs_map & (1 << cs);
|
|
|
+}
|
|
|
+
|
|
|
+static unsigned long gpmc_mem_align(unsigned long size)
|
|
|
+{
|
|
|
+ int order;
|
|
|
+
|
|
|
+ size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
|
|
|
+ order = GPMC_CHUNK_SHIFT - 1;
|
|
|
+ do {
|
|
|
+ size >>= 1;
|
|
|
+ order++;
|
|
|
+ } while (size);
|
|
|
+ size = 1 << order;
|
|
|
+ return size;
|
|
|
+}
|
|
|
+
|
|
|
+static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
|
|
|
+{
|
|
|
+ struct resource *res = &gpmc_cs_mem[cs];
|
|
|
+ int r;
|
|
|
+
|
|
|
+ size = gpmc_mem_align(size);
|
|
|
+ spin_lock(&gpmc_mem_lock);
|
|
|
+ res->start = base;
|
|
|
+ res->end = base + size - 1;
|
|
|
+ r = request_resource(&gpmc_mem_root, res);
|
|
|
+ spin_unlock(&gpmc_mem_lock);
|
|
|
+
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+static int gpmc_cs_delete_mem(int cs)
|
|
|
+{
|
|
|
+ struct resource *res = &gpmc_cs_mem[cs];
|
|
|
+ int r;
|
|
|
+
|
|
|
+ spin_lock(&gpmc_mem_lock);
|
|
|
+ r = release_resource(&gpmc_cs_mem[cs]);
|
|
|
+ res->start = 0;
|
|
|
+ res->end = 0;
|
|
|
+ spin_unlock(&gpmc_mem_lock);
|
|
|
+
|
|
|
+ return r;
|
|
|
+}
|
|
|
+
|
|
|
+int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
|
|
|
+{
|
|
|
+ struct resource *res = &gpmc_cs_mem[cs];
|
|
|
+ int r = -1;
|
|
|
+
|
|
|
+ if (cs > GPMC_CS_NUM)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
+ size = gpmc_mem_align(size);
|
|
|
+ if (size > (1 << GPMC_SECTION_SHIFT))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ spin_lock(&gpmc_mem_lock);
|
|
|
+ if (gpmc_cs_reserved(cs)) {
|
|
|
+ r = -EBUSY;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ if (gpmc_cs_mem_enabled(cs))
|
|
|
+ r = adjust_resource(res, res->start & ~(size - 1), size);
|
|
|
+ if (r < 0)
|
|
|
+ r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
|
|
|
+ size, NULL, NULL);
|
|
|
+ if (r < 0)
|
|
|
+ goto out;
|
|
|
+
|
|
|
+ gpmc_cs_enable_mem(cs, res->start, resource_size(res));
|
|
|
+ *base = res->start;
|
|
|
+ gpmc_cs_set_reserved(cs, 1);
|
|
|
+out:
|
|
|
+ spin_unlock(&gpmc_mem_lock);
|
|
|
+ return r;
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(gpmc_cs_request);
|
|
|
+
|
|
|
+void gpmc_cs_free(int cs)
|
|
|
+{
|
|
|
+ spin_lock(&gpmc_mem_lock);
|
|
|
+ if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
|
|
|
+ printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
|
|
|
+ BUG();
|
|
|
+ spin_unlock(&gpmc_mem_lock);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ gpmc_cs_disable_mem(cs);
|
|
|
+ release_resource(&gpmc_cs_mem[cs]);
|
|
|
+ gpmc_cs_set_reserved(cs, 0);
|
|
|
+ spin_unlock(&gpmc_mem_lock);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(gpmc_cs_free);
|
|
|
+
|
|
|
+/**
|
|
|
+ * gpmc_cs_configure - write request to configure gpmc
|
|
|
+ * @cs: chip select number
|
|
|
+ * @cmd: command type
|
|
|
+ * @wval: value to write
|
|
|
+ * @return status of the operation
|
|
|
+ */
|
|
|
+int gpmc_cs_configure(int cs, int cmd, int wval)
|
|
|
+{
|
|
|
+ int err = 0;
|
|
|
+ u32 regval = 0;
|
|
|
+
|
|
|
+ switch (cmd) {
|
|
|
+ case GPMC_ENABLE_IRQ:
|
|
|
+ gpmc_write_reg(GPMC_IRQENABLE, wval);
|
|
|
+ break;
|
|
|
+
|
|
|
+ case GPMC_SET_IRQ_STATUS:
|
|
|
+ gpmc_write_reg(GPMC_IRQSTATUS, wval);
|
|
|
+ break;
|
|
|
+
|