*/
pgtbl = (VDMA_PGTBL_ENTRY *)__get_free_pages(GFP_KERNEL | GFP_DMA,
get_order(VDMA_PGTBL_SIZE));
- if (!pgtbl)
- BUG();
+ BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
u32 *w;
unsigned char *b;
- if (!cpu_has_veic && !cpu_has_vint)
- BUG();
+ BUG_ON(!cpu_has_veic && !cpu_has_vint);
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
void __kunmap(struct page *page)
{
- if (in_interrupt())
- BUG();
+ BUG_ON(in_interrupt());
if (!PageHighMem(page))
return;
kunmap_high(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
#ifdef CONFIG_DEBUG_HIGHMEM
- if (!pte_none(*(kmap_pte-idx)))
- BUG();
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
local_flush_tlb_one((unsigned long)vaddr);
return;
}
- if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
- BUG();
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
/*
* force other mappings to Oops if they'll try to access
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
- if (pte != pte_offset_kernel(pmd, 0))
- BUG();
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
vaddr += PMD_SIZE;
}
end = address + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
- if (address >= end)
- BUG();
+ BUG_ON(address >= end);
pfn = phys_addr >> PAGE_SHIFT;
do {
if (!pte_none(*pte)) {
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
phys_addr -= address;
- if (address >= end)
- BUG();
+ BUG_ON(address >= end);
do {
pte_t * pte = pte_alloc_kernel(pmd, address);
if (!pte)
phys_addr -= address;
dir = pgd_offset(&init_mm, address);
flush_cache_all();
- if (address >= end)
- BUG();
+ BUG_ON(address >= end);
do {
pud_t *pud;
pmd_t *pmd;