*/
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
- cvmx_wait(100000000ull);
+ __delay(100000000ull);
for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
retry_cnt = 100000;
spxx_clk_ctl.u64 = 0;
spxx_clk_ctl.s.runbist = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(10 * MS);
+ __delay(10 * MS);
spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
if (spxx_bist_stat.s.stat0)
cvmx_dprintf
spxx_clk_ctl.s.rcvtrn = 0;
spxx_clk_ctl.s.srxdlck = 0;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(100 * MS);
+ __delay(100 * MS);
/* Reset SRX0 DLL */
spxx_clk_ctl.s.srxdlck = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
/* Waiting for Inf0 Spi4 RX DLL to lock */
- cvmx_wait(100 * MS);
+ __delay(100 * MS);
/* Enable dynamic alignment */
spxx_trn4_ctl.s.trntest = 0;
spxx_clk_ctl.s.rcvtrn = 1;
spxx_clk_ctl.s.srxdlck = 1;
cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(1000 * MS);
+ __delay(1000 * MS);
/* SRX0 clear the boot bit */
spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
/* Wait for the training sequence to complete */
cvmx_dprintf("SPI%d: Waiting for training\n", interface);
- cvmx_wait(1000 * MS);
+ __delay(1000 * MS);
/* Wait a really long time here */
timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
/*
#ifndef __CVMX_FPA_H__
#define __CVMX_FPA_H__
+#include <linux/delay.h>
+
#include <asm/octeon/cvmx-address.h>
#include <asm/octeon/cvmx-fpa-defs.h>
}
/* Enforce a 10 cycle delay between config and enable */
- cvmx_wait(10);
+ __delay(10);
}
/* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/delay.h>
enum cvmx_mips_space {
CVMX_MIPS_SPACE_XKSEG = 3LL,
return cycle;
}
-/**
- * Wait for the specified number of cycle
- *
- */
-static inline void cvmx_wait(uint64_t cycles)
-{
- uint64_t done = cvmx_get_cycle() + cycles;
-
- while (cvmx_get_cycle() < done)
- ; /* Spin */
-}
-
/**
* Reads a chip global cycle counter. This counts CPU cycles since
* chip reset. The counter is 64 bit.
result = -1; \
break; \
} else \
- cvmx_wait(100); \
+ __delay(100); \
} \
} while (0); \
result; \
cvmx_dprintf("PCIe: Port %d link timeout\n", pcie_port);
return -1;
}
- cvmx_wait(10000);
+ __delay(10000);
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while (pciercx_cfg032.s.dlla == 0);
* don't poll PESCX_CTL_STATUS2[PCIERST], but simply wait a
* fixed number of cycles.
*/
- cvmx_wait(400000);
+ __delay(400000);
/*
* PESCX_BIST_STATUS2[PCLK_RUN] was missing on pass 1 of
i = in_p_offset;
while (i--) {
cvmx_write64_uint32(write_address, 0);
- cvmx_wait(10000);
+ __delay(10000);
}
/*
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
old_in_fif_p_count = dbg_data.s.data & 0xff;
cvmx_write64_uint32(write_address, 0);
- cvmx_wait(10000);
+ __delay(10000);
dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
in_fif_p_count = dbg_data.s.data & 0xff;
} while (in_fif_p_count != ((old_in_fif_p_count+1) & 0xff));
cvmx_dprintf("PCIe: Port %d aligning TLP counters as workaround to maintain ordering\n", pcie_port);
while (in_fif_p_count != 0) {
cvmx_write64_uint32(write_address, 0);
- cvmx_wait(10000);
+ __delay(10000);
in_fif_p_count = (in_fif_p_count + 1) & 0xff;
}
/*
do {
if (cvmx_get_cycle() - start_cycle > octeon_get_clock_rate())
return -1;
- cvmx_wait(10000);
+ __delay(10000);
pciercx_cfg032.u32 = cvmx_pcie_cfgx_read(pcie_port, CVMX_PCIERCX_CFG032(pcie_port));
} while ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1));