--- /dev/null
+GPMC (General Purpose Memory Controller):
+=========================================
+
+GPMC is an unified memory controller dedicated to interfacing external
+memory devices like
+ * Asynchronous SRAM like memories and application specific integrated
+ circuit devices.
+ * Asynchronous, synchronous, and page mode burst NOR flash devices
+ NAND flash
+ * Pseudo-SRAM devices
+
+GPMC is found on Texas Instruments SoC's (OMAP based)
+IP details: http://www.ti.com/lit/pdf/spruh73 section 7.1
+
+
+GPMC generic timing calculation:
+================================
+
+GPMC has certain timings that has to be programmed for proper
+functioning of the peripheral, while peripheral has another set of
+timings. To have peripheral work with gpmc, peripheral timings has to
+be translated to the form gpmc can understand. The way it has to be
+translated depends on the connected peripheral. Also there is a
+dependency for certain gpmc timings on gpmc clock frequency. Hence a
+generic timing routine was developed to achieve above requirements.
+
+Generic routine provides a generic method to calculate gpmc timings
+from gpmc peripheral timings. struct gpmc_device_timings fields has to
+be updated with timings from the datasheet of the peripheral that is
+connected to gpmc. A few of the peripheral timings can be fed either
+in time or in cycles, provision to handle this scenario has been
+provided (refer struct gpmc_device_timings definition). It may so
+happen that timing as specified by peripheral datasheet is not present
+in timing structure, in this scenario, try to correlate peripheral
+timing to the one available. If that doesn't work, try to add a new
+field as required by peripheral, educate generic timing routine to
+handle it, make sure that it does not break any of the existing.
+Then there may be cases where peripheral datasheet doesn't mention
+certain fields of struct gpmc_device_timings, zero those entries.
+
+Generic timing routine has been verified to work properly on
+multiple onenand's and tusb6010 peripherals.
+
+A word of caution: generic timing routine has been developed based
+on understanding of gpmc timings, peripheral timings, available
+custom timing routines, a kind of reverse engineering without
+most of the datasheets & hardware (to be exact none of those supported
+in mainline having custom timing routine) and by simulation.
+
+gpmc timing dependency on peripheral timings:
+[<gpmc_timing>: <peripheral timing1>, <peripheral timing2> ...]
+
+1. common
+cs_on: t_ceasu
+adv_on: t_avdasu, t_ceavd
+
+2. sync common
+sync_clk: clk
+page_burst_access: t_bacc
+clk_activation: t_ces, t_avds
+
+3. read async muxed
+adv_rd_off: t_avdp_r
+oe_on: t_oeasu, t_aavdh
+access: t_iaa, t_oe, t_ce, t_aa
+rd_cycle: t_rd_cycle, t_cez_r, t_oez
+
+4. read async non-muxed
+adv_rd_off: t_avdp_r
+oe_on: t_oeasu
+access: t_iaa, t_oe, t_ce, t_aa
+rd_cycle: t_rd_cycle, t_cez_r, t_oez
+
+5. read sync muxed
+adv_rd_off: t_avdp_r, t_avdh
+oe_on: t_oeasu, t_ach, cyc_aavdh_oe
+access: t_iaa, cyc_iaa, cyc_oe
+rd_cycle: t_cez_r, t_oez, t_ce_rdyz
+
+6. read sync non-muxed
+adv_rd_off: t_avdp_r
+oe_on: t_oeasu
+access: t_iaa, cyc_iaa, cyc_oe
+rd_cycle: t_cez_r, t_oez, t_ce_rdyz
+
+7. write async muxed
+adv_wr_off: t_avdp_w
+we_on, wr_data_mux_bus: t_weasu, t_aavdh, cyc_aavhd_we
+we_off: t_wpl
+cs_wr_off: t_wph
+wr_cycle: t_cez_w, t_wr_cycle
+
+8. write async non-muxed
+adv_wr_off: t_avdp_w
+we_on, wr_data_mux_bus: t_weasu
+we_off: t_wpl
+cs_wr_off: t_wph
+wr_cycle: t_cez_w, t_wr_cycle
+
+9. write sync muxed
+adv_wr_off: t_avdp_w, t_avdh
+we_on, wr_data_mux_bus: t_weasu, t_rdyo, t_aavdh, cyc_aavhd_we
+we_off: t_wpl, cyc_wpl
+cs_wr_off: t_wph
+wr_cycle: t_cez_w, t_ce_rdyz
+
+10. write sync non-muxed
+adv_wr_off: t_avdp_w
+we_on, wr_data_mux_bus: t_weasu, t_rdyo
+we_off: t_wpl, cyc_wpl
+cs_wr_off: t_wph
+wr_cycle: t_cez_w, t_ce_rdyz
+
+
+Note: Many of gpmc timings are dependent on other gpmc timings (a few
+gpmc timings purely dependent on other gpmc timings, a reason that
+some of the gpmc timings are missing above), and it will result in
+indirect dependency of peripheral timings to gpmc timings other than
+mentioned above, refer timing routine for more details. To know what
+these peripheral timings correspond to, please see explanations in
+struct gpmc_device_timings definition. And for gpmc timings refer
+IP details (link above).
return ticks * gpmc_get_fclk_period() / 1000;
}
+static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
+{
+ return ticks * gpmc_get_fclk_period();
+}
+
+static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
+{
+ unsigned long ticks = gpmc_ps_to_ticks(time_ps);
+
+ return ticks * gpmc_get_fclk_period();
+}
+
static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
{
u32 l;
return 0;
}
+static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
+{
+ u32 temp;
+ int div;
+
+ div = gpmc_calc_divider(sync_clk);
+ temp = gpmc_ps_to_ticks(time_ps);
+ temp = (temp + div - 1) / div;
+ return gpmc_ticks_to_ps(temp * div);
+}
+
+/* XXX: can the cycles be avoided ? */
+static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = dev_t->mux;
+ u32 temp;
+
+ /* adv_rd_off */
+ temp = dev_t->t_avdp_r;
+ /* XXX: mux check required ? */
+ if (mux) {
+ /* XXX: t_avdp not to be required for sync, only added for tusb
+ * this indirectly necessitates requirement of t_avdp_r and
+ * t_avdp_w instead of having a single t_avdp
+ */
+ temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ }
+ gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
+
+ /* oe_on */
+ temp = dev_t->t_oeasu; /* XXX: remove this ? */
+ if (mux) {
+ temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
+ temp = max_t(u32, temp, gpmc_t->adv_rd_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
+ }
+ gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
+
+ /* access */
+ /* XXX: any scope for improvement ?, by combining oe_on
+ * and clk_activation, need to check whether
+ * access = clk_activation + round to sync clk ?
+ */
+ temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
+ temp += gpmc_t->clk_activation;
+ if (dev_t->cyc_oe)
+ temp = max_t(u32, temp, gpmc_t->oe_on +
+ gpmc_ticks_to_ps(dev_t->cyc_oe));
+ gpmc_t->access = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
+ gpmc_t->cs_rd_off = gpmc_t->oe_off;
+
+ /* rd_cycle */
+ temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
+ temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
+ gpmc_t->access;
+ /* XXX: barter t_ce_rdyz with t_cez_r ? */
+ if (dev_t->t_ce_rdyz)
+ temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
+ gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = dev_t->mux;
+ u32 temp;
+
+ /* adv_wr_off */
+ temp = dev_t->t_avdp_w;
+ if (mux) {
+ temp = max_t(u32, temp,
+ gpmc_t->clk_activation + dev_t->t_avdh);
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ }
+ gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
+
+ /* wr_data_mux_bus */
+ temp = max_t(u32, dev_t->t_weasu,
+ gpmc_t->clk_activation + dev_t->t_rdyo);
+ /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
+ * and in that case remember to handle we_on properly
+ */
+ if (mux) {
+ temp = max_t(u32, temp,
+ gpmc_t->adv_wr_off + dev_t->t_aavdh);
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
+ }
+ gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
+
+ /* we_on */
+ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
+ gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
+ else
+ gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
+
+ /* wr_access */
+ /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
+ gpmc_t->wr_access = gpmc_t->access;
+
+ /* we_off */
+ temp = gpmc_t->we_on + dev_t->t_wpl;
+ temp = max_t(u32, temp,
+ gpmc_t->wr_access + gpmc_ticks_to_ps(1));
+ temp = max_t(u32, temp,
+ gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
+ gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
+ dev_t->t_wph);
+
+ /* wr_cycle */
+ temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
+ temp += gpmc_t->wr_access;
+ /* XXX: barter t_ce_rdyz with t_cez_w ? */
+ if (dev_t->t_ce_rdyz)
+ temp = max_t(u32, temp,
+ gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
+ gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = dev_t->mux;
+ u32 temp;
+
+ /* adv_rd_off */
+ temp = dev_t->t_avdp_r;
+ if (mux)
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
+
+ /* oe_on */
+ temp = dev_t->t_oeasu;
+ if (mux)
+ temp = max_t(u32, temp,
+ gpmc_t->adv_rd_off + dev_t->t_aavdh);
+ gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
+
+ /* access */
+ temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
+ gpmc_t->oe_on + dev_t->t_oe);
+ temp = max_t(u32, temp,
+ gpmc_t->cs_on + dev_t->t_ce);
+ temp = max_t(u32, temp,
+ gpmc_t->adv_on + dev_t->t_aa);
+ gpmc_t->access = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
+ gpmc_t->cs_rd_off = gpmc_t->oe_off;
+
+ /* rd_cycle */
+ temp = max_t(u32, dev_t->t_rd_cycle,
+ gpmc_t->cs_rd_off + dev_t->t_cez_r);
+ temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
+ gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ bool mux = dev_t->mux;
+ u32 temp;
+
+ /* adv_wr_off */
+ temp = dev_t->t_avdp_w;
+ if (mux)
+ temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
+ gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
+
+ /* wr_data_mux_bus */
+ temp = dev_t->t_weasu;
+ if (mux) {
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
+ temp = max_t(u32, temp, gpmc_t->adv_wr_off +
+ gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
+ }
+ gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
+
+ /* we_on */
+ if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
+ gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
+ else
+ gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
+
+ /* we_off */
+ temp = gpmc_t->we_on + dev_t->t_wpl;
+ gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
+
+ gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
+ dev_t->t_wph);
+
+ /* wr_cycle */
+ temp = max_t(u32, dev_t->t_wr_cycle,
+ gpmc_t->cs_wr_off + dev_t->t_cez_w);
+ gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
+
+ return 0;
+}
+
+static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ u32 temp;
+
+ gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
+ gpmc_get_fclk_period();
+
+ gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
+ dev_t->t_bacc,
+ gpmc_t->sync_clk);
+
+ temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
+ gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
+
+ if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
+ return 0;
+
+ if (dev_t->ce_xdelay)
+ gpmc_t->bool_timings.cs_extra_delay = true;
+ if (dev_t->avd_xdelay)
+ gpmc_t->bool_timings.adv_extra_delay = true;
+ if (dev_t->oe_xdelay)
+ gpmc_t->bool_timings.oe_extra_delay = true;
+ if (dev_t->we_xdelay)
+ gpmc_t->bool_timings.we_extra_delay = true;
+
+ return 0;
+}
+
+static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ u32 temp;
+
+ /* cs_on */
+ gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
+
+ /* adv_on */
+ temp = dev_t->t_avdasu;
+ if (dev_t->t_ce_avd)
+ temp = max_t(u32, temp,
+ gpmc_t->cs_on + dev_t->t_ce_avd);
+ gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
+
+ if (dev_t->sync_write || dev_t->sync_read)
+ gpmc_calc_sync_common_timings(gpmc_t, dev_t);
+
+ return 0;
+}
+
+/* TODO: remove this function once all peripherals are confirmed to
+ * work with generic timing. Simultaneously gpmc_cs_set_timings()
+ * has to be modified to handle timings in ps instead of ns
+*/
+static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
+{
+ t->cs_on /= 1000;
+ t->cs_rd_off /= 1000;
+ t->cs_wr_off /= 1000;
+ t->adv_on /= 1000;
+ t->adv_rd_off /= 1000;
+ t->adv_wr_off /= 1000;
+ t->we_on /= 1000;
+ t->we_off /= 1000;
+ t->oe_on /= 1000;
+ t->oe_off /= 1000;
+ t->page_burst_access /= 1000;
+ t->access /= 1000;
+ t->rd_cycle /= 1000;
+ t->wr_cycle /= 1000;
+ t->bus_turnaround /= 1000;
+ t->cycle2cycle_delay /= 1000;
+ t->wait_monitoring /= 1000;
+ t->clk_activation /= 1000;
+ t->wr_access /= 1000;
+ t->wr_data_mux_bus /= 1000;
+}
+
+int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t)
+{
+ memset(gpmc_t, 0, sizeof(*gpmc_t));
+
+ gpmc_calc_common_timings(gpmc_t, dev_t);
+
+ if (dev_t->sync_read)
+ gpmc_calc_sync_read_timings(gpmc_t, dev_t);
+ else
+ gpmc_calc_async_read_timings(gpmc_t, dev_t);
+
+ if (dev_t->sync_write)
+ gpmc_calc_sync_write_timings(gpmc_t, dev_t);
+ else
+ gpmc_calc_async_write_timings(gpmc_t, dev_t);
+
+ /* TODO: remove, see function definition */
+ gpmc_convert_ps_to_ns(gpmc_t);
+
+ return 0;
+}
+
static __devinit int gpmc_probe(struct platform_device *pdev)
{
int rc;
u32 sync_clk;
/* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
- u16 cs_on; /* Assertion time */
- u16 cs_rd_off; /* Read deassertion time */
- u16 cs_wr_off; /* Write deassertion time */
+ u32 cs_on; /* Assertion time */
+ u32 cs_rd_off; /* Read deassertion time */
+ u32 cs_wr_off; /* Write deassertion time */
/* ADV signal timings corresponding to GPMC_CONFIG3 */
- u16 adv_on; /* Assertion time */
- u16 adv_rd_off; /* Read deassertion time */
- u16 adv_wr_off; /* Write deassertion time */
+ u32 adv_on; /* Assertion time */
+ u32 adv_rd_off; /* Read deassertion time */
+ u32 adv_wr_off; /* Write deassertion time */
/* WE signals timings corresponding to GPMC_CONFIG4 */
- u16 we_on; /* WE assertion time */
- u16 we_off; /* WE deassertion time */
+ u32 we_on; /* WE assertion time */
+ u32 we_off; /* WE deassertion time */
/* OE signals timings corresponding to GPMC_CONFIG4 */
- u16 oe_on; /* OE assertion time */
- u16 oe_off; /* OE deassertion time */
+ u32 oe_on; /* OE assertion time */
+ u32 oe_off; /* OE deassertion time */
/* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
- u16 page_burst_access; /* Multiple access word delay */
- u16 access; /* Start-cycle to first data valid delay */
- u16 rd_cycle; /* Total read cycle time */
- u16 wr_cycle; /* Total write cycle time */
+ u32 page_burst_access; /* Multiple access word delay */
+ u32 access; /* Start-cycle to first data valid delay */
+ u32 rd_cycle; /* Total read cycle time */
+ u32 wr_cycle; /* Total write cycle time */
- u16 bus_turnaround;
- u16 cycle2cycle_delay;
+ u32 bus_turnaround;
+ u32 cycle2cycle_delay;
- u16 wait_monitoring;
- u16 clk_activation;
+ u32 wait_monitoring;
+ u32 clk_activation;
/* The following are only on OMAP3430 */
- u16 wr_access; /* WRACCESSTIME */
- u16 wr_data_mux_bus; /* WRDATAONADMUXBUS */
+ u32 wr_access; /* WRACCESSTIME */
+ u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */
struct gpmc_bool_timings bool_timings;
};
+/* Device timings in picoseconds */
+struct gpmc_device_timings {
+ u32 t_ceasu; /* address setup to CS valid */
+ u32 t_avdasu; /* address setup to ADV valid */
+ /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
+ * of tusb using these timings even for sync whilst
+ * ideally for adv_rd/(wr)_off it should have considered
+ * t_avdh instead. This indirectly necessitates r/w
+ * variations of t_avdp as it is possible to have one
+ * sync & other async
+ */
+ u32 t_avdp_r; /* ADV low time (what about t_cer ?) */
+ u32 t_avdp_w;
+ u32 t_aavdh; /* address hold time */
+ u32 t_oeasu; /* address setup to OE valid */
+ u32 t_aa; /* access time from ADV assertion */
+ u32 t_iaa; /* initial access time */
+ u32 t_oe; /* access time from OE assertion */
+ u32 t_ce; /* access time from CS asertion */
+ u32 t_rd_cycle; /* read cycle time */
+ u32 t_cez_r; /* read CS deassertion to high Z */
+ u32 t_cez_w; /* write CS deassertion to high Z */
+ u32 t_oez; /* OE deassertion to high Z */
+ u32 t_weasu; /* address setup to WE valid */
+ u32 t_wpl; /* write assertion time */
+ u32 t_wph; /* write deassertion time */
+ u32 t_wr_cycle; /* write cycle time */
+
+ u32 clk;
+ u32 t_bacc; /* burst access valid clock to output delay */
+ u32 t_ces; /* CS setup time to clk */
+ u32 t_avds; /* ADV setup time to clk */
+ u32 t_avdh; /* ADV hold time from clk */
+ u32 t_ach; /* address hold time from clk */
+ u32 t_rdyo; /* clk to ready valid */
+
+ u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */
+ u32 t_ce_avd; /* CS on to ADV on delay */
+
+ /* XXX: check the possibility of combining
+ * cyc_aavhd_oe & cyc_aavdh_we
+ */
+ u8 cyc_aavdh_oe;/* read address hold time in cycles */
+ u8 cyc_aavdh_we;/* write address hold time in cycles */
+ u8 cyc_oe; /* access time from OE assertion in cycles */
+ u8 cyc_wpl; /* write deassertion time in cycles */
+ u32 cyc_iaa; /* initial access time in cycles */
+
+ bool mux; /* address & data muxed */
+ bool sync_write;/* synchronous write */
+ bool sync_read; /* synchronous read */
+
+ /* extra delays */
+ bool ce_xdelay;
+ bool avd_xdelay;
+ bool oe_xdelay;
+ bool we_xdelay;
+};
+
+extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
+ struct gpmc_device_timings *dev_t);
+
extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
extern int gpmc_get_client_irq(unsigned irq_config);