[PATCH] pgdat allocation for new node add (specify node id)
[powerpc.git] / drivers / parisc / sba_iommu.c
index 0821747..278f325 100644 (file)
@@ -178,6 +178,11 @@ extern struct proc_dir_entry * proc_mckinley_root;
 #define ROPE6_CTL      0x230
 #define ROPE7_CTL      0x238
 
+#define IOC_ROPE0_CFG  0x500   /* pluto only */
+#define   IOC_ROPE_AO    0x10  /* Allow "Relaxed Ordering" */
+
+
+
 #define HF_ENABLE      0x40
 
 
@@ -1642,9 +1647,9 @@ sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
 **
 **************************************************************************/
 
-static void __iomem *ioc_remap(struct sba_device *sba_dev, int offset)
+static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
 {
-       return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
+       return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
 }
 
 static void sba_hw_init(struct sba_device *sba_dev)
@@ -1759,19 +1764,33 @@ printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
 
        sba_dev->num_ioc = num_ioc;
        for (i = 0; i < num_ioc; i++) {
-               /*
-               ** Make sure the box crashes if we get any errors on a rope.
-               */
-               WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE0_CTL);
-               WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE1_CTL);
-               WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE2_CTL);
-               WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE3_CTL);
-               WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE4_CTL);
-               WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE5_CTL);
-               WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE6_CTL);
-               WRITE_REG(HF_ENABLE, sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
-
-               /* flush out the writes */
+               unsigned long ioc_hpa = sba_dev->ioc[i].ioc_hpa;
+               unsigned int j;
+
+               for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
+
+                       /*
+                        * Clear ROPE(N)_CONFIG AO bit.
+                        * Disables "NT Ordering" (~= !"Relaxed Ordering")
+                        * Overrides bit 1 in DMA Hint Sets.
+                        * Improves netperf UDP_STREAM by ~10% for bcm5701.
+                        */
+                       if (IS_PLUTO(sba_dev->iodc)) {
+                               unsigned long rope_cfg, cfg_val;
+
+                               rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
+                               cfg_val = READ_REG(rope_cfg);
+                               cfg_val &= ~IOC_ROPE_AO;
+                               WRITE_REG(cfg_val, rope_cfg);
+                       }
+
+                       /*
+                       ** Make sure the box crashes on rope errors.
+                       */
+                       WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
+               }
+
+               /* flush out the last writes */
                READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
 
                DBG_INIT("      ioc[%d] ROPE_CFG 0x%Lx  ROPE_DBG 0x%Lx\n",
@@ -2040,7 +2059,7 @@ sba_driver_callback(struct parisc_device *dev)
        u32 func_class;
        int i;
        char *version;
-       void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
+       void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
        struct proc_dir_entry *info_entry, *bitmap_entry, *root;
 
        sba_dump_ranges(sba_addr);