DISCLAIMER: This patch file is provided as is without any guarantees of any kind. It is the difference between a current kernel version and the kernel I use to develop new device drivers, especially for VME. --- linux-2.2.12/Documentation/Configure.help Fri Aug 27 11:19:05 1999 +++ linux/Documentation/Configure.help Fri Aug 27 11:18:36 1999 @@ -11590,6 +11590,28 @@ by IrTTY. To activate support for Parallax dongles you will have to insert "irattach -d litelink" in the /etc/irda/drivers script. +VME bus support +CONFIG_VMEBUS + Say Y here if you want to access the VME bus through PCI<->VME + bridges like the ones found on Motorola MVME boards with PowerPC + processors. For now only the Tundra Universe bridge is supported + and it is still in an early stage of development. + + Note that the answer to this question won't directly affect the + kernel: saying N will just cause this configure script to skip all + the questions about VME bus bridge and devices support. If unsure, say N. + +Universe PCI/VME bridge driver +CONFIG_VME_UNIVERSE + This is an experimental driver which is currently being developed on + Motorola MVME 2600 platforms to control data acquisition systems in + VME crates. It has the potential to crash the whole system since it uses + so much address space on the PCI bus that it could easily cause conflicts + with other devices. For more details have a look into drivers/char/vme + directory. It is not yet supported as a loadable module. Do not include + this unless you know that you absolutely need it. Just say N unless + kernel hacking is one of your favorite activities ! + VME (Motorola and BVM) support CONFIG_VME Say Y here if you want to build a kernel for a 680x0 based VME --- linux-2.2.12/Makefile Fri Aug 27 11:19:05 1999 +++ linux/Makefile Tue Aug 31 10:51:03 1999 @@ -13,7 +13,8 @@ TOPDIR := $(shell if [ "$$PWD" != "" ]; then echo $$PWD; else pwd; fi) HPATH = $(TOPDIR)/include -FINDHPATH = $(HPATH)/asm $(HPATH)/linux $(HPATH)/scsi $(HPATH)/net +FINDHPATH = $(HPATH)/asm $(HPATH)/linux $(HPATH)/scsi $(HPATH)/net \ + $(HPATH)/vme HOSTCC =gcc HOSTCFLAGS =-Wall -Wstrict-prototypes -O2 -fomit-frame-pointer @@ -148,6 +149,10 @@ ifdef CONFIG_SBUS DRIVERS := $(DRIVERS) drivers/sbus/sbus.a +endif + +ifdef CONFIG_VMEBUS +DRIVERS := $(DRIVERS) drivers/vme/vme.a endif ifdef CONFIG_ZORRO --- linux-2.2.12/arch/i386/config.in Thu Aug 19 13:03:24 1999 +++ linux/arch/i386/config.in Thu Aug 19 13:04:56 1999 @@ -198,6 +198,16 @@ fi endmenu +if [ "$CONFIG_EXPERIMENTAL" = "y" ]; then + mainmenu_option next_comment + comment 'VME bus support' + bool 'VME bus support' CONFIG_VMEBUS + if [ "$CONFIG_VMEBUS" != "n" ]; then + source drivers/vme/Config.in + fi + endmenu +fi + mainmenu_option next_comment comment 'Kernel hacking' --- linux-2.2.12/drivers/Makefile Thu Aug 19 13:03:31 1999 +++ linux/drivers/Makefile Thu Aug 19 13:26:49 1999 @@ -10,7 +10,7 @@ SUB_DIRS := block char net misc sound MOD_SUB_DIRS := $(SUB_DIRS) ALL_SUB_DIRS := $(SUB_DIRS) pci sgi scsi sbus cdrom isdn pnp \ - macintosh video dio zorro fc4 usb + macintosh video dio zorro fc4 usb vme ifdef CONFIG_DIO SUB_DIRS += dio @@ -19,6 +19,11 @@ ifdef CONFIG_PCI SUB_DIRS += pci +endif + +ifdef CONFIG_VMEBUS +SUB_DIRS += vme +MOD_SUB_DIRS += vme endif ifdef CONFIG_SBUS --- linux-2.2.12/drivers/char/mem.c Thu Aug 19 13:03:33 1999 +++ linux/drivers/char/mem.c Thu Aug 19 13:05:04 1999 @@ -52,6 +52,9 @@ #if defined(CONFIG_PPC) || defined(CONFIG_MAC) extern void adbdev_init(void); #endif +#ifdef CONFIG_VMEBUS +void vmebus_init(void); +#endif #ifdef CONFIG_USB_UHCI int uhci_init(void); #endif @@ -681,6 +684,9 @@ #endif #ifdef CONFIG_VIDEO_DEV videodev_init(); +#endif +#ifdef CONFIG_VMEBUS + vmebus_init(); #endif return 0; } diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/Config.in linux/drivers/vme/Config.in --- linux-2.2.12/drivers/vme/Config.in Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/Config.in Thu Jul 15 13:01:36 1999 @@ -0,0 +1,15 @@ +# drivers/sound/Config.in +# +# 25 Nov 1998, Gabriel Paubert, +# +# will have to change if we want to support other kinds of bridges + +tristate 'Universe PCI/VME bridge (experimental)' CONFIG_VME_UNIVERSE + +dep_tristate 'BC336 Time code processor (experimental)' CONFIG_VME_BC336 $CONFIG_VME_UNIVERSE + +dep_tristate 'CM-MEM20 VME memory card (experimental)' CONFIG_VME_CM_MEM20 $CONFIG_VME_UNIVERSE + +dep_tristate 'IRAM CORRELATOR (92 vintage)' CONFIG_VME_IRAM_CORREL92 $CONFIG_VME_UNIVERSE + +dep_tristate 'IRAM CORRELATOR (99 vintage)' CONFIG_VME_IRAM_CORREL99 $CONFIG_VME_UNIVERSE diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/Makefile linux/drivers/vme/Makefile --- linux-2.2.12/drivers/vme/Makefile Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/Makefile Thu Jul 15 13:02:01 1999 @@ -0,0 +1,86 @@ +# +# Makefile for the vme device drivers. +# +# Note! Dependencies are done automagically by 'make dep', which also +# removes any old dependencies. DON'T put your own dependencies here +# unless it's something special (ie not a .c file). +# +# Note 2! The CFLAGS definitions are now inherited from the +# parent makes.. +# + +L_TARGET := vme.a +L_OBJS := +LX_OBJS := + +M_OBJS := +MX_OBJS := +MIX_OBJS := + +O_TARGET := +O_OBJS := +OX_OBJS := + +# CONFIG_VMEBUS can't yet be selected as a module. + +# Standard makefile convolutions to get the exported symbols in the kernel. +ifeq ($(CONFIG_VMEBUS),y) + O_TARGET := vme.o + O_OBJS := vme_init.o + ifeq ($(ARCH),ppc) + O_OBJS += copy_user_io.o + ifeq ($(CONFIG_MODULES),y) + OX_OBJS += vme_syms.o + endif + endif + L_OBJS += vme.o +#else +# ifeq ($(CONFIG_VMEBUS),m) +# MIX_OBJS += vme_syms.o +# M_OBJS += vme_mod.o +# endif +endif + + +ifeq ($(CONFIG_VME_UNIVERSE),y) +LX_OBJS += universe.o +else + ifeq ($(CONFIG_VME_UNIVERSE),m) + MX_OBJS += universe.o + endif +endif + +ifeq ($(CONFIG_VME_BC336),y) +L_OBJS += bc336.o +else + ifeq ($(CONFIG_VME_BC336),m) + M_OBJS += bc336.o + endif +endif + +ifeq ($(CONFIG_VME_CM_MEM20),y) +L_OBJS += cm_mem20.o +else + ifeq ($(CONFIG_VME_CM_MEM20),m) + M_OBJS += cm_mem20.o + endif +endif + +ifeq ($(CONFIG_VME_IRAM_CORREL92),y) +L_OBJS += correl92.o +else + ifeq ($(CONFIG_VME_IRAM_CORREL92),m) + M_OBJS += correl92.o + endif +endif + +ifeq ($(CONFIG_VME_IRAM_CORREL99),y) +L_OBJS += correl99.o +else + ifeq ($(CONFIG_VME_IRAM_CORREL99),m) + M_OBJS += correl99.o + endif +endif + +include $(TOPDIR)/Rules.make + Common subdirectories: linux-2.2.12/drivers/vme/RCS and linux/drivers/vme/RCS diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/README.universe linux/drivers/vme/README.universe --- linux-2.2.12/drivers/vme/README.universe Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/README.universe Tue Apr 27 07:04:41 1999 @@ -0,0 +1,3 @@ +Please see the file universe.tex (not yet finished). + + diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/bc336.c linux/drivers/vme/bc336.c --- linux-2.2.12/drivers/vme/bc336.c Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/bc336.c Mon Nov 15 13:30:46 1999 @@ -0,0 +1,439 @@ +/* This is the code to control Datum bc336 Time code processor. + * G.Paubert paubert@iram.es 1998-1999 + */ + +/* + * Results of test while compiling the kernel with 2000 Hz heartbeat: + * + * count/min/max/sum/sum2=3cc377/7bc/394e/7b87065df/fb25a116c337 + * + * Interrupts: 3982199 + * Interval from first to last: 33159145000 60nS ticks = 1989.5487s + * Minimum: 1980 ticks = 118.8 uS Ouch! + * Maximum: 14660 ticks = 879.6 uS + * + * Same results on a an idle kernel: + * + * count/min/max/sum/sum2=2b740f/1be2/251e/5856558e7/b39531e34433 + * + * Minimum: 7138 ticks = 428.3 uS + * Maximum: 9502 ticks = 570.1 uS + * + * Results of test while compiling the kernel with 1000 Hz heartbeat: + * + * count/min/max/sum/sum2=189783/19ff/680a/63fc8a62a/19689caba4bde + * + * Interrupts: 1611651 + * Interval from first to last: 26839918000 60nS ticks = 1610.3951s + * Minimum: 6655 ticks = 399.3 uS + * Maximum: 26634 ticks = 1598.0 uS + * + * Same results on a an idle kernel: + * + * count/min/max/sum/sum2=2862c0/3b00/471c/a433bad2e/29b9ebf4604f2 + * + * Minimum: 15104 ticks = 906.2 uS + * Maximum: 18204 ticks = 1092.2 uS + * + * Results of test while compiling the kernel with 500 Hz heartbeat: + * + * count/min/max/sum/sum2=e96d0/6cc6/9791/76a24949f/3c4b30a03f2d3 + * + * Interrupts: 956112 + * Interval from first to last: *********** 60nS ticks = ****.****s + * Minimum: 27846 ticks = 1670.8 uS + * Maximum: 28801 ticks = 2328.1 uS + * + * Same results on a an idle (only cvs update -d) kernel: + * + * count/min/max/sum/sum2=1032c6/7ad0/897f/83b83cb40/42f1a87fdaf02 + * + * Minimum: 31440 ticks = 1886.4 uS + * Maximum: 35199 ticks = 2111.9 uS + * + * And on a truly idle kernel: + * + * count/min/max/sum/sum2=b85600/7c31/8806/5daf650e2e/2f9d17c6193b62 + * + * Minimum: 31793 ticks = 1907.6 uS + * Maximum: 34822 ticks = 2089.3 uS + * + * 2 days with various activities including make modules: + * + * count/min/max/sum/sum2=4e6a6cc/666f/9ddc/27da70c8063/144131ed4753399 + * + * Minimum: 27259 ticks = 1635.5 uS + * Maximum: 40412 ticks = 2424.7 uS + * + * making a diff of 2 linux source trees: max delay~252 us + * + */ + +#include +#include +#include +#include +#include +#include + +#define TCSEL 0x0001 +#define TVINTEN 0x0003 +#define EVENT 0x0005 +#define MODE 0x0007 +#define PROPDEL0 0x0009 +#define PROPDEL1 0x000b +#define HBCTRL 0x0021 +#define HBRATE0 0x0023 +#define HBRATE1 0x0025 +#define FILTER 0x002d +#define FRMCNT 0x002f +#define LEAP 0x0037 + +#define HOLDSTAT 0x01f3 + +#define REQTIME(i) (0x0401+2*i) +#define STATUS 0x0419 +#define FERR0 0x041b +#define FERR1 0x041d +#define TVFLAG 0x041f +#define EVTIME(i) (0x0421+2*i) + +#define WARMSTRT 0x07fd +#define INT3ACK 0x07ff + +#define INTCR0 0x0801 +#define INTCR1 0x0803 +#define INTCR2 0x0805 +#define INTCR3 0x0807 +#define INTV0 0x0809 +#define INTV1 0x080b +#define INTV2 0x080d +#define INTV3 0x080f + +#define TIMEREQ 0x0c01 + +#define PINTCLR0 0x0e01 +#define PINTCLR1 0x0e03 +#define PINTCLR2 0x0e05 +#define PINTCLR3 0x0e07 + + +static int bc336_open(struct inode *, struct file *); +static int bc336_release(struct inode *, struct file *); +static int bc336_ioctl(struct inode *, struct file *, + unsigned int, unsigned long); + +static struct file_operations bc336_fops = { + NULL, /* lseek */ + NULL, /* read */ + NULL, /* write */ + NULL, /* readdir */ + NULL, /* poll */ + bc336_ioctl, + NULL, /* mmap (might be implemented later) */ + bc336_open, + NULL, /* flush */ + bc336_release, + NULL, /* fsync */ + NULL, /* fasync */ + NULL, /* check_media_change */ + NULL, /* revalidate */ + NULL /* lock */ +}; + + +static void pps_handler(struct vme_interrupt *); +static void heartbeat_handler(struct vme_interrupt *); +static void event_handler(struct vme_interrupt *); + +static struct private { + struct vme_device device; + struct vme_region regs; + struct vme_interrupt pps; + struct vme_interrupt heartbeat; + struct vme_interrupt event; + u_int pps_count, hb_count, event_count; + u_int del_min, del_max, tstamp; + u_int del_sum, del_sum_ovf, del_sum2, del_sum2_ovf; +} bc336 = { + device: {fops: &bc336_fops, + name: "bc336", + minor: 1}, + regs: {flags: VME_AM_A16(8|16|32) | VME_USE_PIO }, + pps: {handler: pps_handler, + name: "1pps"}, + heartbeat: {handler: heartbeat_handler, + name: "heartbeat"}, + event: {handler: event_handler, + name: "event"}, + del_min: ~0UL, +}; + +static int base=-1; +/* list of vector/level pairs */ +static int irqs[8] = {-1, -1, -1, -1, -1, -1 , -1, -1} ; +static u_int heartbeat=1000; +static int code=0x52; /* Flywheeling */ +MODULE_PARM(base, "i"); +MODULE_PARM(irqs, "2-8i"); +MODULE_PARM(code, "i"); +MODULE_PARM(heartbeat, "i"); + +static void pps_handler(struct vme_interrupt *p) { + struct private *lp=(struct private *)(p->device); + /* Count the the 1pps interrupt */ + lp->pps_count++; + if (lp->pps_count == 2) { + volatile u_char * dp = lp->regs.kvaddr; + vme_write8(0, dp+PINTCLR2); + vme_write8(lp->heartbeat.level | 0x10, dp+INTCR2); + } +} + +static void heartbeat_handler(struct vme_interrupt *p) { + struct private *lp=(struct private *)(p->device); + int tb; + + asm volatile("mftb %0": "=r" (tb)); + if (lp->hb_count) { + u_int delta = tb - lp->tstamp; + if (deltadel_min) lp->del_min=delta; + if (delta>lp->del_max) lp->del_max=delta; + lp->del_sum += delta; + if (lp->del_sum < delta) lp->del_sum_ovf++; + lp->del_sum2 += delta*delta; + if (lp->del_sum2 < delta*delta) lp->del_sum2_ovf++; + } + lp->hb_count++; + lp->tstamp = tb; +} + +static void event_handler(struct vme_interrupt *p) { + struct private *lp=(struct private *)(p->device); + volatile u_char *dp=lp->regs.kvaddr; + u_char dummy; + + lp->event_count++; + dummy=vme_read8(dp+INT3ACK); + vme_write8(vme_read8(dp+TVFLAG)&~2, dp+TVFLAG); +} + +static int bc336_open(struct inode * inode, struct file * file) +{ + MOD_INC_USE_COUNT; + return 0; +} + +static int bc336_release(struct inode * inode, struct file * file) +{ + MOD_DEC_USE_COUNT; + return 0; +} + +static int bc336_ioctl(struct inode * inode, struct file * file, + unsigned int cmd, unsigned long arg) +{ + int error, i, ready; + volatile u_char *dp=bc336.regs.kvaddr; + if (_IOC_SIZE(cmd) && + !access_ok((_IOC_TYPE(cmd)&_IOC_READ) ? VERIFY_WRITE:VERIFY_READ, + arg, _IOC_SIZE(cmd))) return -EFAULT; + switch(cmd) { + +#define param ((bc336_timeval *) arg) + case BC336_READ: + error = __put_user(vme_read8(dp+STATUS), ¶m->state); + if (error) break; + error = -ETIME; + vme_write8(vme_read8(dp+TVFLAG) & ~1, dp+TVFLAG); + /* PPC never reorder writes, but a bridge might still + * perform store gathering. It is unlikely in this case + * since TVFLAG and TIMEREQ are quite far away. + */ + vme_waw_barrier(); + vme_write8(0, dp+TIMEREQ); + /* This is necessary otherwise the write8 might sit in a + * write buffer for a while and we want the preceding write + * to be pushed to the device as soon as poossible. + */ + vme_raw_barrier(); + /* Should use the interrupt with a timeout ? That's just + * above 100 uS, so it is not that long. But we should + * lock against concurrent use under SMP (and UP if we use + * interrupts). + */ + for (i = 0; i<100; i++) { + if ((ready=vme_read8(dp+TVFLAG)&0x01)) break; + udelay(5); + } + if (!ready) break; + error = __put_user(vme_read8(dp+REQTIME(0))*100 + + vme_read8(dp+REQTIME(1))*10 + + vme_read8(dp+REQTIME(2)), + ¶m->yday); + error |= __put_user(vme_read8(dp+REQTIME(3))*10 + + vme_read8(dp+REQTIME(4)), + ¶m->hour); + error |= __put_user(vme_read8(dp+REQTIME(5))*10 + + vme_read8(dp+REQTIME(6)), + ¶m->min); + error |= __put_user(vme_read8(dp+REQTIME(7))*10 + + vme_read8(dp+REQTIME(8)), + ¶m->sec); + error |= __put_user((vme_read8(dp+REQTIME(9))<<16) + + (vme_read8(dp+REQTIME(10))<<8) + + vme_read8(dp+REQTIME(10)), + ¶m->halfmicroseconds); + break; + +#undef param + default: + return -EINVAL; + } + + return error; +} + +#ifdef MODULE +void cleanup_module(void) { + struct private *lp = &bc336; + volatile u_char *dp = lp->regs.kvaddr; + printk("Unloading bc336:\n" + " count/min/max/sum/sum2=%x/%x/%x/%x%08x/%x%08x\n", + lp->hb_count, lp->del_min, lp->del_max, + lp->del_sum_ovf, lp->del_sum, + lp->del_sum2_ovf, lp->del_sum2); + /* Disable interrupts */ + if (dp) { + vme_write8(0, dp+INTCR0); + vme_write8(0, dp+INTCR2); + vme_write8(0, dp+INTCR3); + } + bc336.regs.kvaddr = 0; + vme_unregister_device(&bc336.device); +} +#endif + +#ifdef MODULE +#define bc336_init init_module +#endif + +int __init bc336_init(void) +{ + int error; + u_long tmp; + volatile u_char * dp; + + /* Minimal check that there is a board at this address: at least it + * guarantees that there will be an error if the board has been + * unplugged or is at a different address. Recognizing that it actually + * is the board we expect is too complex since there is no signature. + */ + error = vme_safe_access(VME_READ8, VME_AM_A16(8), + base+STATUS, &tmp); + if (error) return error; + + bc336.regs.base=base; + bc336.regs.limit=base + 0xfff; + bc336.pps.level=irqs[0]; + bc336.pps.vector=irqs[1]; + bc336.heartbeat.level=irqs[4]; + bc336.heartbeat.vector=irqs[5]; + bc336.event.level=irqs[6]; + bc336.event.vector=irqs[7]; + + do { + error = vme_register_region(&bc336.device, &bc336.regs); + if (error) break; + dp = bc336.regs.kvaddr; + error = vme_request_interrupt(&bc336.device, &bc336.pps); + if (error) break; + printk("bc336 at kernel virtual address %p,\n" + "1 pps interrupt level %d vector %d\n", + dp, bc336.pps.level, bc336.pps.vector); + + while(vme_read8(dp+WARMSTRT)) udelay(10); /* Should timeout */ + vme_write8(code, dp+TCSEL); + + /* In this case use xtime (10ms resolution is enough), + * for testing, this code is bloat and should be removed + * in a production version. It is not even correct! + */ + if (code==0x52 || code== 3) { + struct timeval tv = xtime; + u_int day, tmp, year; + /* Ensure that we read a coherent value */ + while (tv.tv_usec != xtime.tv_usec) tv = xtime; + day = xtime.tv_sec/86400 + 365; /* Since 1969 Jan 1 */ + tmp = xtime.tv_sec%86400; + year = day*4/1461; /* Since 1969 */ + vme_write8( (year%4 == 3) ? 1 : 0, dp+LEAP); + + day = (day-(year*1461)/4) + 1; + + vme_write8(day/100, dp+REQTIME(0)); day%=100; + vme_write8(day/10, dp+REQTIME(1)); day%=10; + vme_write8(day, dp+REQTIME(2)); + + vme_write8(tmp/36000, dp+REQTIME(3)); tmp%=36000; + vme_write8(tmp/3600, dp+REQTIME(4)); tmp%=3600; + vme_write8(tmp/600, dp+REQTIME(5)); tmp%=600; + vme_write8(tmp/60, dp+REQTIME(6)); tmp%=60; + vme_write8(tmp/10, dp+REQTIME(7)); tmp%=10; + vme_write8(tmp, dp+REQTIME(8)); + + vme_write8((xtime.tv_usec*2)>>16, dp+REQTIME(9)); + vme_write8((xtime.tv_usec*2)>>8, dp+REQTIME(10)); + vme_write8((xtime.tv_usec*2), dp+REQTIME(11)); + } + vme_write8(0, dp+HBCTRL); + if (heartbeat) { + vme_write8(heartbeat>>8, dp+HBRATE0); + vme_write8(heartbeat, dp+HBRATE1); + error = vme_request_interrupt(&bc336.device, + &bc336.heartbeat); + if (error) break; + vme_write8(1, dp+HBCTRL); + } + vme_write8(0, dp+TVINTEN); + vme_write8(0, dp+EVENT); + vme_write8(0, dp+PROPDEL0); + vme_write8(0, dp+PROPDEL1); + /* Assume no frequency error */ + vme_write8(33920/256, dp+FERR0); + vme_write8(33920%256, dp+FERR1); + + error = vme_request_interrupt(&bc336.device, &bc336.event); + if (error) break; + + error=vme_register_device(&bc336.device); + if (error) break; + + /* Now we start the driver no matter what */ + vme_write8(1, dp+WARMSTRT); + vme_write8(bc336.pps.vector, dp+INTV0); + vme_write8(bc336.heartbeat.vector, dp+INTV2); + vme_write8(bc336.event.vector, dp+INTV3); + + /* Clear any pending interrupt before enabling it */ + vme_write8(0, dp+PINTCLR0); + vme_write8(bc336.pps.level | 0x10, dp+INTCR0); + + /* The hearbeat will be enabled on the second 1pps pulse, + * since the bc336 takes some time to set it up which + * significantly disturbs statistics. + */ + vme_write8(0, dp+PINTCLR2); + vme_write8(bc336.heartbeat.level, dp+INTCR2); + + /* For now we do not enable that one */ + vme_write8(0, dp+PINTCLR3); + vme_write8(bc336.event.level /*| 0x10*/, dp+INTCR2); + return 0; + } while (0); +#ifdef MODULE + cleanup_module(); +#endif + return error; +} + diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/cm_mem20.c linux/drivers/vme/cm_mem20.c --- linux-2.2.12/drivers/vme/cm_mem20.c Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/cm_mem20.c Thu Nov 11 13:31:15 1999 @@ -0,0 +1,197 @@ +/* This is the code to access a RAM board on a VME bus. + * + * It is mainly designed to test DMA functions as there is little + * point in using a RAM board on a VME bus these days (except perhaps + * if the board provides a large amount of NVRAM). + * + * The board I've been using for tests is a CM-MEM20 from + * Computadoras Modulares in Sevilla, Spain. + * + * G.Paubert paubert@iram.es 1999 + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int cm_mem20_open(struct inode *, struct file *); +static int cm_mem20_release(struct inode *, struct file *); +static int cm_mem20_ioctl(struct inode *, struct file *, + unsigned int, unsigned long); + +static struct file_operations cm_mem20_fops = { + NULL, /* lseek */ + NULL, /* read */ + NULL, /* write */ + NULL, /* readdir */ + NULL, /* poll */ + cm_mem20_ioctl, + NULL, /* mmap (might be implemented later) */ + cm_mem20_open, + NULL, /* flush */ + cm_mem20_release, + NULL, /* fsync */ + NULL, /* fasync */ + NULL, /* check_media_change */ + NULL, /* revalidate */ + NULL /* lock */ +}; + +static void dma_handler(struct vme_dma *); + +static struct private { + struct vme_device device; + struct vme_region memory; + struct vme_dma dmaread, dmawrite; + struct vme_dmavec dmareadvec[2]; + struct vme_dmavec dmawritevec[2]; + struct wait_queue * wait; +} cm_mem20 = { + device: {fops: &cm_mem20_fops, + name: "cm_mem20", + minor: 2}, + memory: {flags: VME_AM_A24(16) | + VME_USE_PIO | VME_USE_DMA | VME_USE_RMW}, + dmaread: {handler: dma_handler, + timeout: HZ}, + dmawrite: {handler: dma_handler, + timeout: HZ}, + dmareadvec: {[0 ... 1] = {length: PAGE_SIZE, + flags: VME_DMA(READ, A24(16))}}, + dmawritevec: {[0 ... 1] = {length: PAGE_SIZE, + flags: VME_DMA(WRITE, A24(16))}}, + wait: NULL, +}; + +static int mem=-1; + +MODULE_PARM(mem, "i"); + +static int cm_mem20_open(struct inode * inode, struct file * file) +{ + MOD_INC_USE_COUNT; + return 0; +} + +static int cm_mem20_release(struct inode * inode, struct file * file) +{ + MOD_DEC_USE_COUNT; + return 0; +} + +static void dma_handler(struct vme_dma* dma) { + vme_release_dmalist(dma); + wake_up(&cm_mem20.wait); +} + + +static int cm_mem20_ioctl(struct inode * inode, struct file * file, + unsigned int cmd, unsigned long arg) +{ + int error; + //volatile u_char *dp=cm_mem20.memory.vaddr; + if (_IOC_SIZE(cmd) && + !access_ok((_IOC_TYPE(cmd)&_IOC_READ) ? VERIFY_WRITE:VERIFY_READ, + arg, _IOC_SIZE(cmd))) return -EFAULT; + switch(cmd) { + case CM_MEM20_READP: + error = vme_queue_dmalist(&cm_mem20.dmaread, + cm_mem20.dmareadvec, 2); + if (error) break; + wait_event(cm_mem20.wait, + !test_bit(VME_DMA_BUSY, &cm_mem20.dmaread.flags)); + error = cm_mem20.dmaread.error; + break; + default: + return -EINVAL; + } + return error; +} + +#ifdef MODULE +void cleanup_module(void) { + struct private *lp = &cm_mem20; + printk("Unloading cm_mem20:\n"); + + vme_unregister_device(&lp->device); + free_page(lp->dmareadvec[0].kvaddr); + free_page(lp->dmareadvec[1].kvaddr); + lp->dmareadvec[0].kvaddr = lp->dmareadvec[1].kvaddr = + lp->dmawritevec[0].kvaddr = lp->dmawritevec[1].kvaddr = 0; + +} +#endif + +#ifdef MODULE +#define cm_mem20_init init_module +#endif + +int __init cm_mem20_init(void) +{ + int error; + struct private * lp = &cm_mem20; + u_long tmp; + + /* These checks are crude, but the best we could reasonably do is + * to also check the last bytes of the RAM. + */ + error = vme_safe_access(VME_READ32, VME_AM_A24(16), mem, &tmp); + if (error) return error; + + tmp = 0x55aaaa55; + error = vme_safe_access(VME_WRITE32, VME_AM_A24(16), mem, &tmp); + if (error) return error; + + error = vme_safe_access(VME_READ32, VME_AM_A24(16), mem, &tmp); + if (error) return error; + if (tmp != 0x55aaaa55) return -ENXIO; + + lp->memory.base=mem; + lp->memory.limit=mem + 0xfffff; + lp->dmareadvec[0].vme_addr = + lp->dmawritevec[0].vme_addr = mem; + lp->dmareadvec[1].vme_addr = + lp->dmawritevec[1].vme_addr = mem+PAGE_SIZE; + + do { + error = vme_register_region(&lp->device, &lp->memory); + if (error) break; + printk("cm_mem20 at kernel virtual address %p.\n", + lp->memory.kvaddr); + + /* Here we should perform a sanity check and verify that + * the board is actually at the claimed base address. But + * it's hard on that hardware. + */ + error = vme_alloc_dmalist(&lp->device, &lp->dmaread, 4); + if (error) break; + error = vme_alloc_dmalist(&lp->device, &lp->dmawrite, 4); + if (error) break; + error=vme_register_device(&lp->device); + if (error) break; + error = -ENOMEM; + lp->dmareadvec[0].kvaddr = lp->dmawritevec[0].kvaddr = + get_free_page(GFP_USER); + lp->dmareadvec[1].kvaddr = lp->dmawritevec[1].kvaddr = + get_free_page(GFP_USER); + if (!lp->dmareadvec[0].kvaddr || !lp->dmareadvec[1].kvaddr) + break; + return 0; + } while (0); +#ifdef MODULE + cleanup_module(); +#endif + return error; +} + diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/copy_user_io.S linux/drivers/vme/copy_user_io.S --- linux-2.2.12/drivers/vme/copy_user_io.S Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/copy_user_io.S Tue Nov 16 12:57:30 1999 @@ -0,0 +1,233 @@ +/* + * Memory copying functions with minimal and always aligned memory accesses. + * + * Copyright (C) 1999 Gabriel Paubert. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + .text + .align 2 + .globl copy_user_io +/* Input parameters: r3 destination, r4 source, r5 count, + * return value is number of noncopied bytes. + * Note that since we never perform an unaligned memory access, the tricks + * used in __copy_tofrom_user to align to copy up to the last non faulting byte + * are not necessary because protection domains under PPC are page aligned + * in the worst case and thus an instruction either faults for all the bytes + * or for none. Of course a few bytes may be in the pipeline, but either the + * fault is on write and the preload of bytes can't be undone or it is on read + * and there are a few bytes left which could be stored to I/O but anyway the + * caller is buggy and gets what it deserves (Linux version 2.0.x would + * would not even have tried to call this code because access_ok would + * have failed). By using only aligned instructions, this code nevertheless + * prevents the partial execution of any load/store instruction which may + * happen on PPC when operands cross page boundaries, and which can actually + * result in repeating the same bus transaction. + * + * the basic strategy is: + * a) read up to 6 bytes in buf0:buf1 to be able to align both + * the source and the destination addresses + * b) store buf0:buf1 until the destination address is 4 byte-aligned, keeping + * up to 3 valid bytes in buf0 + * c) execute the main loop by blocks of 4 bytes + * d) fetch the last remaining bytes (if any) and append them to buf0, so that + * again up to 6 bytes are in buf0:buf1 + * e) store buf0:buf1 until the buffer is empty + * however the code has been ordered in such a way that operations consisting + * exclusively of 32 bit aligned transfers are favored. For unaligned cases, + * static branch prediction has been set up to favor moves of multiple of + * 16 bits aligned on even addresses, which are hopefully the most important + * non 32 bit aligned case. + * + * Expansion of this code to 64 bit is not completely straightforward because + * of the special cases which arise with short transfers. There are still 2 + * free registers (r11 and r12) which could be used to unroll the main loops, + * this is fairly simple but not worth the trouble when one of the + * areas is non cacheable (on one board, I've measured something like 300 + * cycles per iteration on a 200MHz 603e for the aligned loop). + * Anyway large transfers should use DMA. + */ + +#define cr0 0 +#define cr1 1 +#define cr6 6 +#define cr7 7 +#define rem 3 /* remaining bytes to store (this must be r3) */ +#define src 4 /* source pointer */ +#define cnt 5 /* input count, keeps track of bytes not yet read */ +#define dst 6 /* destination pointer */ +#define rsa 7 /* right shift amount = number of bits (or scratch)*/ +#define lsa 8 /* left shift amount (or scratch) */ +#define tmp 9 /* scratch register */ +#define buf0 0 /* high order bytes of buffer */ +#define buf1 10 /* low order bytes of buffer */ +#define ACCESS(opcode, reg, address, fixup) \ +0: opcode reg,address; \ + .section __ex_table,"a"; \ + .align 2; \ + .long 0b,fixup; \ + .previous +#ifdef __KERNEL__ +#define CHECK(reg, mask) +#else +#define CHECK(reg, mask) \ + andi. 12,reg,mask; bnelr-; +#endif + .global copy_user_io + .type copy_user_io,@function +/* New, optimized version */ +copy_user_io: + or rsa,3,src + addi src,src,-4 + andi. rsa,rsa,3 + addi dst,3,-4 + bne- cr0,9f # unaligned case +/* Mutually aligned copies branch back here once the first bytes are copied */ +1: cmplwi cr1,cnt,4 + srwi buf0,cnt,2 # note here that rsa is zero + clrlwi. rem,cnt,30 # to get return count right on exception + mtctr buf0 + blt- cr1,3f # only few bytes (but matched alignment) +2: ACCESS(lwzu, buf0, 4(src), 18f) + ACCESS(stwu, buf0, 4(dst), 18f) + bdnz+ 2b +3: beqlr+ cr0 # return (actually not necessary !) + addi cnt,rem,0 # same src and dst remaining byte count + li buf0,0 # clean register for following +/* Code from here is also used as finishing case for unaligned copies, + * both src and dst are 4 byte aligned except for some pathological cases + * when the byte count was originally less than 6. + * Anyway, from here cnt<4, rem<7. + */ +4: mtcrf 0x1,cnt + bf- 30,5f + CHECK(src,1) + ACCESS(lhz, buf1, 4(src), 19f) + addi src,src,2 # buf0 has 0 to 3 bytes + slwi buf1,buf1,16 + subfic lsa,rsa,32 # shift count + srw tmp,buf1,rsa + addi rsa,rsa,16 + slw buf1,buf1,lsa + or buf0,buf0,tmp # buf0:buf1 has 2 to 5 bytes +5: bf+ 31,6f + ACCESS(lbz, tmp, 4(src), 19f) + subfic rsa,rsa,24 # 24, 16, 8, 0, 56, 48 for 0 to 5 bytes + slw lsa,tmp,rsa + xori rsa,rsa,32 # 56, 48, 40, 32, 24, 16 + slw tmp,tmp,rsa # This works only because the PPC uses a + or buf0,buf0,lsa # 6 bit shift count so that shift amounts + or buf1,buf1,tmp # higher than 32 clear the result register. +6: mtcrf 0x1,rem + bf- 29,7f + CHECK(dst,3) + ACCESS(stwu, buf0, 4(dst), 19f) + addi rem,rem,-4 + addi buf0,buf1,0 # 0 to 2 bytes +7: bf- 30,8f + rotlwi buf0,buf0,16 + CHECK(dst,1) + ACCESS(sth, buf0, 4(dst), 19f) + addi rem,rem,-2 + addi dst,dst,2 +8: bflr+ 31 + rotlwi buf0,buf0,8 + ACCESS(stb, buf0, 4(dst), 19f) + li rem,0 + blr +/* Unaligned case */ +9: cmplwi cr1,cnt,2 + neg tmp,src + neg lsa,dst + mtcrf 0x1,tmp + addi rem,cnt,0 + li rsa,0 # Needed for early exit if count<2 + li buf0,0 + blt- cr1,4b # special case for count<2 + clrlwi tmp,tmp,30 # bytes to load to align source + clrlwi lsa,lsa,30 # bytes to store to align destination + cmplw cr6,lsa,tmp # need to try to fetch 4 more bytes ? + bf+ 31,10f + ACCESS(lbz, buf0, 4(src), 19f) + addi cnt,cnt,-1 + addi src,src,1 + li rsa,8 + slwi buf0,buf0,24 +10: li buf1,0 + bf- 30,12f +11: cmplwi cr1,cnt,2 # no point in trying 4 if even 2 + blt- cr1,13f # would not fit so skip + CHECK(src,1) + ACCESS(lhz, tmp, 4(src), 19f) + addi src,src,2 + slwi tmp,tmp,16 + addi cnt,cnt,-2 + srw tmp,tmp,rsa + addi rsa,rsa,16 + or buf0,buf0,tmp +/* Here load only if cnt>=4 and cr6.gt is true, the code for a 16 bit load + * can actually be executed twice: this solves the nasty case of a count of + * five bytes moved from a 2 mod 4 address to a 1 mod 4 where it is necessary + * to perform the 2 16 bit loads early because otherwise we would not + * have the opportunity to perform the 1st 16 bit store in the early part. + */ +12: bng+ cr6,13f + cmplwi cr6,cnt,4 # jump back with cr6.gt clear + blt- cr6,11b # (although trying twice would not harm) + CHECK(src,3) + ACCESS(lwzu, buf1, 4(src), 19f) + addi cnt,cnt,-4 + srw tmp,buf1,rsa + or buf0,buf0,tmp + subfic tmp,rsa,32 + addi rsa,rsa,32 + slw buf1,buf1,tmp +13: mtcrf 0x1,lsa # mask for first stores + bf+ 31,14f + rotlwi buf0,buf0,8 # No check for at least 1 byte necessary ! + ACCESS(stb, buf0, 4(dst), 19f) + addi rsa,rsa,-8 + rlwimi buf0,buf1,8,0xff + addi dst,dst,1 + slwi buf1,buf1,8 + addi rem,rem,-1 +14: bf- 30,15f + cmplwi cr1,rsa,16 + blt cr1,15f + rotlwi buf0,buf0,16 + CHECK(dst,1) + ACCESS(sth, buf0, 4(dst), 19f) + addi rsa,rsa,-16 + rlwimi buf0,buf1,16,0xffff + addi dst,dst,2 + addi rem,rem,-2 +15: cmplwi cr1,rsa,0 # case of mutually aligned large copies + beq+ cr1,1b # which we hope will be common + srwi. tmp,cnt,2 + subfic lsa,rsa,32 + mtctr tmp + clrrwi tmp,cnt,2 + beq- cr0,4b + sub rem,rem,tmp +16: CHECK(src,3) + ACCESS(lwzu, buf1, 4(src), 18f) + srw tmp,buf1,rsa + or buf0,buf0,tmp + CHECK(dst,3) + ACCESS(stwu, buf0, 4(dst), 18f) + slw buf0,buf1,lsa + bdnz+ 16b + clrlwi cnt,cnt,30 + b 4b + + .section .fixup,"ax" + .align 2 +18: mfctr cnt + slwi cnt,cnt,2 + addi rem,rem,cnt +19: blr + .previous + diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/correl92.c linux/drivers/vme/correl92.c --- linux-2.2.12/drivers/vme/correl92.c Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/correl92.c Sat Oct 23 19:15:11 1999 @@ -0,0 +1,557 @@ +/* This is the code to control the IRAM correlator (1992). + * G.Paubert paubert@iram.es 1998-1999 + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* This has been directly copied from drivers/char/mem.c, as the comment + * say it is ugly to have to repeat it here. + */ + +/* + * This should probably be per-architecture in + */ +static inline +unsigned long pgprot_noncached(unsigned long prot) { +#if defined(__i386__) + if (boot_cpu_data.x86 > 3) + prot |= _PAGE_PCD; +#elif defined(__powerpc__) + prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; +#elif defined(__mc68000__) + if (CPU_IS_020_OR_030) + prot |= _PAGE_NOCACHE030; + /* Use no-cache mode, serialized */ + if (CPU_IS_040_OR_060) + prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S; +#elif defined(__mips__) + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; +#endif + + return prot; +} + +/* End of drivers/char/mem.c stolen code. */ + +static int corr_open(struct inode *, struct file *); +static int corr_release(struct inode *, struct file *); +static int corr_ioctl(struct inode *, struct file *, + unsigned int, unsigned long); +static int corr_mmap(struct file *file, struct vm_area_struct *vma); +static unsigned int corr_poll(struct file *, struct poll_table_struct *); +static ssize_t corr_read(struct file *, char *, size_t, loff_t *); + +static struct file_operations corr_fops = { + NULL, /* lseek */ + corr_read, /* read */ + NULL, /* write */ + NULL, /* readdir */ + corr_poll, /* poll */ + corr_ioctl, + corr_mmap, /* mmap */ + corr_open, + NULL, /* flush */ + corr_release, + NULL, /* fsync */ + NULL, /* fasync */ + NULL, /* check_media_change */ + NULL, /* revalidate */ + NULL /* lock */ +}; + + +static void readout_handler(struct vme_interrupt *); +static void setflag_handler(struct vme_interrupt *); +static void dma_handler(struct vme_dma *); +static void accumulate(void *); + +/* We could switch between 2 DMA buffers for reading the data, but anyway, + * if we don't have time to accumulate before the next data arrives, we are + * in deep trouble. It would be a very marginal improvement in practice. + * There are several accumulation buffers however, this is clearly necessary + * given that they are processed from user space. + */ + +static struct corr_buflist { + struct corr_buflist *next; + corr_raw_spectrum spectrum; +} buffers[4]; + +static struct private { + struct vme_device device; + struct vme_region regs; + struct vme_region dma_area; + struct vme_interrupt readout; + struct vme_interrupt readout_l4; + struct vme_interrupt readout_l3; + struct vme_interrupt phase; + struct vme_interrupt cycle; + struct vme_dma dma; + struct vme_dmavec dmalist[2]; + struct tq_struct accumul_bh; + struct wait_queue *wait; + int opened; /* Exclusive use: only one fd allowed to the HW */ + spinlock_t state_lock; + struct corr_buflist * acquire_buf; + struct corr_buflist * ready_bufs; + struct corr_buflist * free_bufs; + u_int sequence; + int request_time; + int nphase; + int done_phase; + int flagphase; + int flagcycle; + int newphase; + int newcycle; + int acquire; + int insync; + int status; +} corr = { + device: {fops: &corr_fops, + name: "correl92", + minor: 3}, + regs: {base: 0x000000, + limit: 0x007fff, + flags: VME_AM_A16(16) | VME_USE_PIO}, + dma_area: {base: 0x000000, + limit: 0x001fff, + flags: VME_AM_A16(16) | VME_USE_DMA}, + readout: {handler: readout_handler, + name: "readout", + handler_data: (void *) &corr, + level: 5, vector: 0xfe,}, + readout_l4:{handler: readout_handler, + name: "readout_l4 (Universe bug workaround)", + handler_data: (void *) &corr, + level: 4, vector: 0xfe,}, + readout_l3:{handler: readout_handler, + name: "readout_l3 (Universe bug workaround)", + handler_data: (void *) &corr, + level: 3, vector: 0xfe,}, + phase: {handler: setflag_handler, + name: "phase", + handler_data: (void *) &corr.flagphase, + level: 4, vector: 0xfd}, + cycle: {handler: setflag_handler, + name: "cycle", + handler_data: (void *) &corr.flagcycle, + level: 3, vector: 0xfb}, + dma: {handler: dma_handler, + handler_data: &corr, + maxfrags: 2, + timeout: HZ/50}, + dmalist: { {0, 0x000000, 0x1000, VME_AM_A16(16)}, + {0, 0x001000, 0x20, VME_AM_A16(16)}}, + accumul_bh:{0, 0, accumulate, &corr}, + opened: 0, + state_lock: SPIN_LOCK_UNLOCKED, +}; + +static void readout_handler(struct vme_interrupt *p) { + struct private *lp=(struct private *)(p->handler_data); + /* Here we simply have to start the DMA but only + * if the integration is active and we do not have to kill + * the current buffer. + */ + /* We should better handle errors: if dma->state is positive + * the dma is still busy. Could be considered an overrun error. + * If vme_queue_dmalist returns an error, print some message + * and perhaps wake up the waiting process. + * Let us hope that it works. + */ + spin_lock(&lp->state_lock); + /* This clears all flags in case nphase==0. */ + lp->newphase = lp->flagphase && lp->nphase > 1; + lp->newcycle = lp->flagcycle && lp->nphase > 1; + lp->flagphase = lp->flagcycle = 0; + if (lp->nphase != 0) { + if (lp->acquire) { + vme_queue_dmalist(&lp->dma, lp->dmalist, 2); + } else { + /* First synchronization at start of integration */ + if (lp->newphase || lp->nphase==1) lp->acquire = 1; + } + } + spin_unlock(&lp->state_lock); +} + + +/* Very simple handler, race conditions with two flags and another interrupt + * in the middle forced to use spinlocks instead of atomic variables. + */ + +static void setflag_handler(struct vme_interrupt *p) { + int * lp = (int *) p->handler_data; + spin_lock(&corr.state_lock); + *lp = corr.nphase>1; + spin_unlock(&corr.state_lock); +} + +static void dma_handler(struct vme_dma *p) { + struct private *lp=(struct private *)(p->handler_data); + int np, status=p->error; + /* Because of race conditions in the interrupts, it may happen + * that we get the phase interrupt, followed by the readout interrupt + * followed by the cycle interrupt. In this case we have to + * flag that the cycle interrupt actually happened before the readout. + * If the interrupt has not happened after the DMA has finished, + * however, it is pretty clear that it was a phase and not a cycle + * interrupt. Note that if will completely break if 2 phase interrupts + * come very close one after the other, but the whole system is not + * designed to handle more than 1 phase or cycle interupts every + * 2 readouts cycles. That's simply impossible to handle. + */ + vme_release_dmalist(p); + if (status) printk(KERN_ERR "DMA error %d reading correlator\n", + status); + /* Actually we should not process the data if there is an error, + * or at least flag that the data is questionable. + */ + spin_lock(&lp->state_lock); + lp->status = status; + np = lp->newphase; + if (!lp->acquire) goto out; /* Very unlikely */ + if (np && lp->flagcycle) { + lp->newcycle = lp->nphase > 1; + lp->flagcycle = 0; + } + if (!lp->insync) { + if (lp->newcycle) { + lp->insync = 1; + } else if (np) { + /* Reset because we had a false start. + * But actually 2 phase observations could + * be started on any phase. + */ + lp->acquire_buf->spectrum.time = 0; + lp->acquire_buf->spectrum.status = 0; + goto out; + } + } + /* We simply defer processing of the buffer to the end of interrupt, + * but we have to process even completely blanked readouts in the case + * a new phase has ended to make sure that the phase end is recorded + * and the buffer switched. + */ + if (*(u_short *)(lp->dmalist[1].kvaddr) || lp->newphase || status) { + queue_task(&lp->accumul_bh, &tq_immediate); + mark_bh(IMMEDIATE_BH); + } + out: + spin_unlock(&lp->state_lock); +} + +/* This function is called in a bottom half handler, with interrupts enabled */ +static void accumulate(void *p) { + struct private *lp=(struct private *)p; + int i; + u_int * sum, time; + struct corr_buflist *bp; + u_short * data = (u_short *) lp->dmalist[0].kvaddr; + + spin_lock_irq(&lp->state_lock); + bp = lp->acquire_buf; + time = bp->spectrum.time; + bp->spectrum.time += *(u_short *)(lp->dmalist[1].kvaddr); + spin_unlock_irq(&lp->state_lock); + sum = bp->spectrum.data; + if (time == 0) { + for (i=2048; i>0; --i) { + *sum++ = *data++; + } + } else { + for (i=2048; i>0; --i) { + *sum++ += *data++; + } + } + + /* Here we have to check if the readout is done and wake + * up the process and switch buffers if yes. A slightly subtle point + * is that we never get here in case we restart when hunting the first + * phase at start of integration. So the test for newphase to switch + * buffers and wake up reader is valid. + */ + spin_lock_irq(&lp->state_lock); + if (lp->acquire && + (lp->newphase || lp->status || + (lp->nphase==1 && time>lp->request_time))) { + bp->spectrum.sequence = ++lp->sequence; + bp->spectrum.status = lp->status; + bp->next=NULL; + if (lp->newphase) { + lp->done_phase = (lp->newcycle || + (lp->done_phase+1 == lp->nphase)) + ? 0 : lp->done_phase + 1; + bp->spectrum.phase = lp->done_phase; + } else { + lp->done_phase = 0; + bp->spectrum.phase = 0; + } + if (!lp->ready_bufs) { + lp->ready_bufs=bp; + } else { + /* This is not the normal case, otherwise a doubly + * linked list or the end of the list would have + * been kept, but the list is short (one entry + * may be present occasionally, 2 is unlikely and + * 3 is overrun). + */ + struct corr_buflist *np=lp->ready_bufs; + while (np->next) np=np->next; + np->next = bp; + } + if (lp->free_bufs) { + bp = lp->acquire_buf = lp->free_bufs; + lp->free_bufs = lp->free_bufs->next; + } else { /* Overrun, let us drop the oldest ready buffer */ + bp = lp->acquire_buf = lp->ready_bufs; + lp->ready_bufs = lp->ready_bufs->next; + } + bp->spectrum.time = 0; + bp->spectrum.phase = 0; + bp->spectrum.status = 0; + wake_up_interruptible(&lp->wait); + } + spin_unlock_irq(&lp->state_lock); +} + +static int corr_open(struct inode * inode, struct file * file) +{ + if (corr.opened) return -EBUSY; + corr.opened = 1; + MOD_INC_USE_COUNT; + return 0; +} + +static int corr_release(struct inode * inode, struct file * file) +{ + corr.opened = 0; + corr.acquire = corr.insync = corr.nphase = 0; + MOD_DEC_USE_COUNT; + return 0; +} + +static unsigned int corr_poll(struct file *file, + struct poll_table_struct *wait) { + struct private *lp = &corr; + poll_wait(file, &lp->wait, wait); + return lp->ready_bufs ? POLLIN|POLLRDNORM : 0; +} + +static ssize_t +corr_read(struct file *file, char *buf, size_t count, loff_t *ppos) { + struct corr_buflist *bp; + ssize_t retval = 0; + struct private *lp = &corr; + + count = ((count>sizeof(corr_raw_spectrum)) ? + sizeof(corr_raw_spectrum) : count); + if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; + /* No need to spinlock/spin_unlock like mad on this test: + * interrupts never clear ready_bufs, never touch lp->nphase + * which is protected by the global kernel lock at this point. + */ + if (file->f_flags & O_NONBLOCK && !lp->ready_bufs) { + return lp->nphase ? -EAGAIN : 0; + } else { + retval = + wait_event_interruptible(lp->wait, + (lp->ready_bufs || lp->nphase==0)); + if (retval) return retval; + } + /* lp->nphase is protected by the globel kernel lock since + * it's only set by an ioctl. + */ + if (lp->nphase==0) return 0; + /* Now we need to lock to touch variable modified by interrupts */ + spin_lock_irq(&lp->state_lock); + bp = lp->ready_bufs; + if (bp) lp->ready_bufs = lp->ready_bufs->next; + spin_unlock_irq(&lp->state_lock); + + /* Should we retry the whole game in this case ? It should + * not be frequent since the system is not designed to have multiple + * readers. + */ + if (!bp) return 0; + retval = count; + if (__copy_to_user(buf, &bp->spectrum, count)) retval = -EFAULT; + + /* Put the buffer on the free list */ + spin_lock_irq(&lp->state_lock); + bp->next = lp->free_bufs; + lp->free_bufs = bp; + spin_unlock_irq(&lp->state_lock); + return retval; +} + +static int corr_ioctl(struct inode * inode, struct file * file, + unsigned int cmd, unsigned long arg) +{ + int error, nphase, reqtime; + struct private *lp = &corr; + if (_IOC_SIZE(cmd) && + !access_ok((_IOC_TYPE(cmd)&_IOC_READ) ? VERIFY_WRITE:VERIFY_READ, + arg, _IOC_SIZE(cmd))) return -EFAULT; + switch(cmd) { + + case CORR_SET_MODE: +#define param ((corr_mode *) arg) + error = __get_user(nphase, ¶m->nphase); + error |= __get_user(reqtime, ¶m->reqtime); + if (error) break; + spin_lock_irq(&lp->state_lock); + /* We also reset anything which was or might have + * been in progress. i.e. this restarts integration + * from the time it is called. + */ + lp->nphase = nphase; + lp->insync = (nphase==1); + lp->request_time = reqtime; + lp->acquire_buf = buffers+0; + lp->free_bufs = buffers+1; + buffers[1].next = buffers+2; + buffers[2].next = buffers+3; + buffers[3].next = NULL; + buffers[0].spectrum.time = 0; + buffers[0].spectrum.phase = 0; + buffers[0].spectrum.status = 0; + lp->acquire = lp->sequence = lp->done_phase = 0; + lp->flagphase = lp->flagcycle = 0; + lp->ready_bufs = NULL; + spin_unlock_irq(&lp->state_lock); + break; +#undef param + default: + error = -EINVAL; + break; + } + + return error; +} + +static int corr_mmap(struct file *file, struct vm_area_struct *vma) { + u_long off = vma->vm_offset; + u_long len = vma->vm_end - vma->vm_start; + struct private *lp = &corr; + + if (off & ~PAGE_MASK) return -EINVAL; + + if (len+off>0x8000) return -ENXIO; + + pgprot_val(vma->vm_page_prot) = + pgprot_noncached(pgprot_val(vma->vm_page_prot)); + vma->vm_flags |= VM_IO; + if (remap_page_range(vma->vm_start, + (u_long) lp->regs.phyaddr+off, + len, vma->vm_page_prot)) return -EAGAIN; + return 0; +} + +#ifdef MODULE +void cleanup_module(void) { + struct private *lp = &corr; + u_long tmp=0; + printk(KERN_INFO "Unloading correl92\n"); + /* Disable interrupts in a safe way */ + if (lp->regs.kvaddr) { + vme_safe_access(VME_WRITE16, VME_AM_A16(16), 0x004c00, &tmp); + } + lp->regs.kvaddr = 0; + kfree((void *) lp->dmalist[0].kvaddr); + kfree((void *) lp->dmalist[1].kvaddr); + vme_unregister_device(&lp->device); +} +#endif + +#ifdef MODULE +#define corr_init init_module +#endif + +int __init corr_init(void) +{ + int error; + u_long tmp; + corr_hardware * dp; + struct private *lp = &corr; + + /* Minimal check of the presence of some HW. Verifying that this + * actually is the hardware we expect is too complex since there + * is no signature. This also sets up the timing chip correctly. + */ + tmp = 0xd230; + error = vme_safe_access(VME_WRITE16, VME_AM_A16(16), 0x003000, &tmp); + if (error) return error; + error = vme_safe_access(VME_READ16, VME_AM_A16(16), 0x003000, &tmp); + if (error) return error; + if (tmp!=0xd230) return -ENXIO; + + do { + /* Allocate DMA memory */ + error = -ENOMEM; + if (!(lp->dmalist[0].kvaddr = + (u_long) kmalloc(0x1000, GFP_KERNEL))) break; + + if (!(lp->dmalist[1].kvaddr = + (u_long) kmalloc(0x20, GFP_KERNEL))) break; + + error = vme_register_region(&lp->device, &lp->regs); + if (error) break; + dp = (corr_hardware *) lp->regs.kvaddr; + printk(KERN_INFO "correl92: kernel virtual address %p,\n", dp); + + /* Disable interrupts just in case, some systems do not + * perform a VME reset on shutdown and interrupts may stay + * pending across reboots (arghhh!). + */ + vme_write16(0x00, &dp->interrupt); + + /* This is only informative, it only appears as a line + * in /proc/bus/vme/regions. + */ + error = vme_register_region(&lp->device, &lp->dma_area); + if (error) break; + + error = vme_request_interrupt(&lp->device, &lp->readout); + if (error) break; + + error = vme_request_interrupt(&lp->device, &lp->readout_l4); + if (error) break; + + error = vme_request_interrupt(&lp->device, &lp->readout_l3); + if (error) break; + + error = vme_request_interrupt(&lp->device, &lp->cycle); + if (error) break; + + error = vme_request_interrupt(&lp->device, &lp->phase); + if (error) break; + + error = vme_alloc_dmalist(&lp->device, &lp->dma, 2); + if (error) break; + + error = vme_register_device(&lp->device); + if (error) break; + + /* Now we start the driver no matter what, + * this simply resumes to enabling the interrupts. + */ + vme_write16(0x0f, &dp->interrupt); + return 0; + } while (0); +#ifdef MODULE + cleanup_module(); +#endif + return error; +} diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/correl99.c linux/drivers/vme/correl99.c --- linux-2.2.12/drivers/vme/correl99.c Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/correl99.c Fri Oct 1 21:48:44 1999 @@ -0,0 +1,453 @@ +/* This is the code to control the IRAM correlator (1999). + * G.Paubert paubert@iram.es 1998-1999 + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* This has been directly copied from drivers/char/mem.c, as the comment + * say it is ugly to have to repeat it here. + */ + +/* + * This should probably be per-architecture in + */ +static inline +unsigned long pgprot_noncached(unsigned long prot) { +#if defined(__i386__) + if (boot_cpu_data.x86 > 3) + prot |= _PAGE_PCD; +#elif defined(__powerpc__) + prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; +#elif defined(__mc68000__) + if (CPU_IS_020_OR_030) + prot |= _PAGE_NOCACHE030; + /* Use no-cache mode, serialized */ + if (CPU_IS_040_OR_060) + prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S; +#elif defined(__mips__) + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; +#endif + + return prot; +} + +/* End of drivers/char/mem.c stolen code. */ + +static int corr_open(struct inode *, struct file *); +static int corr_release(struct inode *, struct file *); +static int corr_ioctl(struct inode *, struct file *, + unsigned int, unsigned long); +static int corr_mmap(struct file *file, struct vm_area_struct *vma); +static unsigned int corr_poll(struct file *, struct poll_table_struct *); +static ssize_t corr_read(struct file *, char *, size_t, loff_t *); + +static struct file_operations corr_fops = { + NULL, /* lseek */ + corr_read, /* read */ + NULL, /* write */ + NULL, /* readdir */ + corr_poll, /* poll */ + corr_ioctl, + corr_mmap, /* mmap */ + corr_open, + NULL, /* flush */ + corr_release, + NULL, /* fsync */ + NULL, /* fasync */ + NULL, /* check_media_change */ + NULL, /* revalidate */ + NULL /* lock */ +}; + + +static void int32Hz_handler(struct vme_interrupt *); +static void hertz_handler(struct vme_interrupt *); +static void dma_handler(struct vme_dma *); +static void accumulate(void *); + +/* We could switch between 2 buffers for reading the data, but anyway, + * if we don't have time to accumulate before the next data arrives, we are + * in deep trouble. It would be a very marginal improvement in practice. + * There are 2 accumulation buffers however, this is clearly necessary + * given that they are procesed from user space. + */ + +static struct corr_buflist { + struct corr_buflist *next; + corr_spectrum spectrum; +} buffers[4]; + +static struct private { + struct vme_device device; + struct vme_region regs; + struct vme_region dma_area; + struct vme_interrupt int32Hz; + struct vme_interrupt hertz; + struct vme_dma dma; + struct vme_dmavec dmalist; + struct tq_struct accumul_bh; + struct wait_queue *wait; + int opened; /* Exclusive use: no 2 users may have it open */ + spinlock_t state_lock; + struct corr_buflist * acquire_buf; + struct corr_buflist * ready_bufs; + struct corr_buflist * free_bufs; + u_int dumps; + int acquire; + int insync; + int clearbuf; +} corr = { + device: {fops: &corr_fops, + name: "correl99", + minor: 6}, + regs: {base: 0x000000, + limit: 0x00ffff, + flags: VME_AM_A24(16) | VME_USE_PIO}, + dma_area: {base: 0x000000, + limit: 0x0077ff, + flags: VME_AM_A24_BLT(32) | VME_USE_DMA}, + int32Hz: {handler: int32Hz_handler, + name: "32Hz", + handler_data: &corr, + level: 4, vector: 0xf1}, + hertz: {handler: hertz_handler, + name: "hertz", + handler_data: &corr, + level: 3, vector: 0xf2}, + dma: {handler: dma_handler, + handler_data: &corr, + maxfrags: 2, + timeout: HZ}, + dmalist: {vme_addr: 0, + length: 0x7800, + flags: VME_DMA(READ, A24_BLT(32))}, + accumul_bh:{0, 0, accumulate, &corr}, + opened: 0, +}; + +static void int32Hz_handler(struct vme_interrupt *p) { + struct private *lp=(struct private *)(p->handler_data); + /* Here we simply have to start the DMA but only + * if the integration is active and we do not have to kill + * the current buffer. + */ + /* We should better handle errors: if dma->state is positive + * the dma is still busy. Could be considered an overrun error. + * If vme_queue_dmalist returns an error, print some message + * and perhaps wake up the waiting process. + * Let us hope that it works. + */ + spin_lock(lp->state_lock); + if (lp->insync) { + vme_queue_dmalist(&lp->dma, &lp->dmalist, 1); + lp->clearbuf=(lp->dumps++ == 0); + } + spin_unlock(lp->state_lock); +} + +static void hertz_handler(struct vme_interrupt *p) { + struct private *lp=(struct private *)(p->handler_data); + + spin_lock(lp->state_lock); + if (lp->insync && lp->dumps !=32) { + printk("Bad integration: %d dumps!\n", lp->dumps); + } + lp->insync = lp->acquire; + lp->dumps = 0; + spin_unlock(lp->state_lock); +} + +static void dma_handler(struct vme_dma *p) { + struct private *lp=(struct private *)(p->handler_data); + /* We simply defer processing of the buffer to the end of interrupt, + * but we don't even queue processing of completely blanked readouts. + */ + if (lp->insync) { + queue_task(&lp->accumul_bh, &tq_immediate); + mark_bh(IMMEDIATE_BH); + } +} + +/* This function is called in a bottom half handler, with interrupts enabled */ +static void accumulate(void *p) { + struct private *lp=(struct private *)p; + int i; + u_int * sum; + struct corr_buflist *bp; + u_short * data = (u_short *) lp->dmalist.kvaddr; + + bp = lp->acquire_buf; + sum = bp->spectrum.data; + if (lp->clearbuf) { + for (i=CHANNELS/2; i>0; --i) { + sum[0] = ntohs(data[0]); + sum[1] = ntohs(data[1]); + sum += 2; + data += 2; + } + } else { + for (i=CHANNELS/2; i>0; --i) { + sum[0] += ntohs(data[0]); + sum[1] += ntohs(data[1]); + sum += 2; + data += 2; + } + } + + /* Here we have to check if the readout is done and wake + * up the process and switch buffers if yes. + */ + spin_lock_irq(&lp->state_lock); + if (lp->insync && lp->dumps == 0) { + bp->next=NULL; + if (!lp->ready_bufs) { + lp->ready_bufs=bp; + } else { + /* This is not the normal case, otherwise a doubly + * linked list or the end of the list would have + * been kept, but the list is short (one entry + * may be present occasionally, 2 is unlikely and + * 3 is overrun). + */ + struct corr_buflist *np=lp->ready_bufs; + while (np->next) np=np->next; + np->next = bp; + } + if (lp->free_bufs) { + bp = lp->acquire_buf = lp->free_bufs; + lp->free_bufs = lp->free_bufs->next; + } else { /* Overrun, let us drop the oldest ready buffer */ + bp = lp->acquire_buf = lp->ready_bufs; + lp->ready_bufs = lp->ready_bufs->next; + } + wake_up_interruptible(&lp->wait); + } + spin_unlock_irq(&lp->state_lock); +} + +static int corr_open(struct inode * inode, struct file * file) +{ + if (corr.opened) return -EBUSY; + corr.opened = 1; + MOD_INC_USE_COUNT; + return 0; +} + +static int corr_release(struct inode * inode, struct file * file) +{ + corr.opened = 0; + corr.acquire = corr.insync = 0; + MOD_DEC_USE_COUNT; + return 0; +} + +static unsigned int corr_poll(struct file *file, + struct poll_table_struct *wait) { + struct private *lp = &corr; + poll_wait(file, &lp->wait, wait); + return lp->ready_bufs ? POLLIN|POLLRDNORM : 0; +} + +static ssize_t +corr_read(struct file *file, char *buf, size_t count, loff_t *ppos) { + struct corr_buflist *bp; + struct wait_queue wait = { current, NULL }; + ssize_t retval = 0; + + count = (count>sizeof(corr_spectrum)) ? sizeof(corr_spectrum) : count; + if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; + /* No need to spinlock/spin_unlock like mad in the loop, + * interrupts never clear ready_bufs and the global + * kernel lock protects us from other processors should it be + * the case (although it is not designed to work this way). + */ + if (!corr.ready_bufs && corr.acquire) { + add_wait_queue(&corr.wait, &wait); + current->state = TASK_INTERRUPTIBLE; + while (!corr.ready_bufs && corr.acquire) { + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + break; + } + if (signal_pending(current)) { + retval = -ERESTARTSYS; + break; + } + schedule(); + barrier(); + } + current->state = TASK_RUNNING; + remove_wait_queue(&corr.wait, &wait); + if (retval) return retval; + } + /* corr.acquire is protected by the globel kernel lock since + * it's only set by an ioctl. + */ + if (corr.acquire==0) return 0; + /* Now we need to lock to touch variable modified by interrupts */ + spin_lock_irq(&corr.state_lock); + bp = corr.ready_bufs; + corr.ready_bufs = corr.ready_bufs->next; + spin_unlock_irq(&corr_state.lock); + + retval = count; + if (__copy_to_user(buf, &bp->spectrum, count)) retval = -EFAULT; + + /* Put the buffer on the free list */ + spin_lock_irq(&corr_state.lock); + bp->next = corr.free_bufs; + corr.free_bufs = bp; + spin_unlock_irq(&corr_state.lock); + return retval; +} + +static int corr_ioctl(struct inode * inode, struct file * file, + unsigned int cmd, unsigned long arg) +{ + int error, acquire; + if (_IOC_SIZE(cmd) && + !access_ok((_IOC_TYPE(cmd)&_IOC_READ) ? VERIFY_WRITE:VERIFY_READ, + arg, _IOC_SIZE(cmd))) return -EFAULT; + switch(cmd) { + + case CORR_SET_MODE: +#define param ((corr_mode *) arg) + error = __get_user(acquire, ¶m->acquire); + if (error) break; + spin_lock_irq(&corr.state_lock); + /* We also reset anything which was or might have + * been in progress. i.e. this restarts integration + * from the time it says this. + */ + corr.acquire = acquire; + corr.insync = 0; + corr.acquire_buf = buffers+0; + corr.free_bufs = buffers+1; + buffers[1].next = buffers+2; + buffers[2].next = buffers+3; + buffers[3].next = NULL; + corr.dumps = 0; + corr.ready_bufs = NULL; + spin_unlock_irq(&corr.state_lock); + break; +#undef param + default: + error = -EINVAL; + break; + } + + return error; +} + +static int corr_mmap(struct file *file, struct vm_area_struct *vma) { + u_long off = vma->vm_offset; + u_long len = vma->vm_end - vma->vm_start; + + if (off & ~PAGE_MASK) return -EINVAL; + + if (len+off>0x10000) return -ENXIO; + + pgprot_val(vma->vm_page_prot) = + pgprot_noncached(pgprot_val(vma->vm_page_prot)); + vma->vm_flags |= VM_IO; + if (remap_page_range(vma->vm_start, + (u_long) corr.regs.phyaddr+off, + len, vma->vm_page_prot)) return -EAGAIN; + return 0; +} + +#ifdef MODULE +void cleanup_module(void) { + struct private *lp = &corr; + volatile corr_hw *dp = (corr_hw *) lp->regs.kvaddr; + printk("Unloading correl99\n"); + /* Disable interrupts */ + if (dp) { + vme_write16(0, &dp->misc[0x3f0]); + } + corr.regs.kvaddr = 0; + kfree((void *) corr.dmalist.kvaddr); + vme_unregister_device(&lp->device); +} +#endif + +#ifdef MODULE +#define corr_init init_module +#endif + +int __init corr_init(void) +{ + int error; + u_long tmp; + volatile corr_hw * dp; + + /* Minimal check of the presence of some HW. Verifying that this + * actually is the hardware we expect is too complex since there + * is no signature. This also sets up the timing chip correctly. + */ + tmp = corr.int32Hz.vector; + error = vme_safe_access(VME_WRITE8, VME_AM_A24(16), + 0x00fff1, &tmp); + if (error) return error; + + error = vme_safe_access(VME_READ8, VME_AM_A24(16), + 0x00fff1, &tmp); + if (error) return error; + + if (tmp != corr.int32Hz.vector) return -ENODEV; + + do { + /* Allocate DMA memory */ + error = -ENOMEM; + if (!(corr.dmalist.kvaddr = + (u_long) kmalloc(0x7800, GFP_KERNEL))) break; + + error = vme_register_region(&corr.device, &corr.regs); + if (error) break; + dp = (volatile corr_hw *) corr.regs.kvaddr; + printk("correl99: kernel virtual address %p,\n", dp); + + vme_write16(corr.hertz.vector, dp->misc+0x3f9); + vme_write16(corr.int32Hz.level, dp->misc+0x3fc); + vme_write16(corr.hertz.level, dp->misc+0x3fd); + + /* This is only informative, it only appears as a line + * in /proc/bus/vme/regions. + */ + error = vme_register_region(&corr.device, &corr.dma_area); + if (error) break; + + error = vme_request_interrupt(&corr.device, &corr.int32Hz); + if (error) break; + + error = vme_request_interrupt(&corr.device, &corr.hertz); + if (error) break; + + error = vme_alloc_dmalist(&corr.device, &corr.dma, 2); + if (error) break; + + error=vme_register_device(&corr.device); + if (error) break; + + /* Now we start the driver no matter what, + * this simply resumes to enabling the interrupts. + */ + vme_write16(0x03, &dp->misc[0x3f0]); + return 0; + } while (0); +#ifdef MODULE + cleanup_module(); +#endif + return error; +} diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/universe.aux linux/drivers/vme/universe.aux --- linux-2.2.12/drivers/vme/universe.aux Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/universe.aux Sun Oct 24 21:37:49 1999 @@ -0,0 +1,24 @@ +\relax +\@writefile{toc}{\contentsline {section}{\numberline {1}Foreword}{1}} +\@writefile{toc}{\contentsline {section}{\numberline {2}Configuring your kernel}{1}} +\@writefile{toc}{\contentsline {section}{\numberline {3}Driver loading and chip initialization}{2}} +\@writefile{toc}{\contentsline {section}{\numberline {4}Accessing the VME bus from application programs}{2}} +\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Opening the device}{2}} +\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Selecting the type of VME accesses you want to perform}{3}} +\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces VME address modifier names}}{4}} +\newlabel{tbl:am}{{1}{4}} +\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Diagnostic ioctls}{4}} +\@writefile{toc}{\contentsline {section}{\numberline {5}Access to VME from kernel mode (writing board specific drivers)}{5}} +\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Registering a driver}{5}} +\@writefile{toc}{\contentsline {subsection}{\numberline {5.2}Checking that a device is responding}{5}} +\@writefile{toc}{\contentsline {subsection}{\numberline {5.3}Direct access to VMEbus devices}{5}} +\@writefile{toc}{\contentsline {subsection}{\numberline {5.4}Interrupts}{5}} +\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.4.1}Interrupt handlers}{5}} +\@writefile{toc}{\contentsline {subsection}{\numberline {5.5}DMA}{5}} +\@writefile{toc}{\contentsline {subsubsection}{\numberline {5.5.1}DMA termination handlers}{6}} +\@writefile{toc}{\contentsline {subsection}{\numberline {5.6}Other functions}{6}} +\@writefile{toc}{\contentsline {subsection}{\numberline {5.7}Restrictions}{6}} +\@writefile{toc}{\contentsline {subsection}{\numberline {5.8}Unregistering the driver}{7}} +\@writefile{toc}{\contentsline {section}{\numberline {6}Caveats}{7}} +\@writefile{toc}{\contentsline {section}{\numberline {7}Remaining problems}{7}} +\@writefile{toc}{\contentsline {section}{\numberline {8}Unsupported Universe features}{8}} diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/universe.c linux/drivers/vme/universe.c --- linux-2.2.12/drivers/vme/universe.c Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/universe.c Mon Nov 15 13:54:26 1999 @@ -0,0 +1,2540 @@ +/* + * drivers/vme/universe.c -- Driver for Tundra Universe PCI<->VME bridge. + * + * Copyright (C) 1997-1999 Gabriel Paubert, paubert@iram.es + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +/* Restrictions: + * - only one Universe supported per system (but is it truly a restriction ? + * The theories about multiple Universes are still extremely speculative + * and cannot be relied upon, so they are best avoided altogether :-)) + * - currently only works on PPC processors, although every attempt has + * been made to make porting to other architectures relatively easy. + * - this driver has been designed for VME crates in which there is + * basically only one master and all other boards are slaves. + * - the Universe is a very complex chip and it had a significant number of + * errata, which are published in a document by Tundra. In simple + * environments (the Universe is the only master on the VME bus), most + * of these bugs can be worked around by avoiding posted writes and + * by setting bursts of 32 bytes. + */ + +/* Known problems: + * -DMA timeout handler is not guaranteed to succesfully terminate pending + * DMA operations since it seems that the Universe may require owning the + * VMEbus before acknowledging a DMA STOP request. So if the bus is taken + * permanently by another master, there may be no way of recovering from this + * condition (note that this will never be a problem when the Universe is the + * only possible master, and the code will detect starvation due to excessive + * use of the coupled and posted writes channels). + * + * - The handling of unexpected interrupts requires more thought. Current + * implementation is fairly safe and should work properly in most cases even + * when designing and debugging hardware, but requires unloading and reloading + * the driver when a problem happens and a level becomes permanently disabled. + */ + + +/* Undefine this if your BIOS/Firmware allows you to select the universe + * slave image configuration. Otherwise edit the code which is enabled by + * this macro to suit your needs. + */ + +#define UNIVERSE_IMAGE_SETUP + +/* On some boards triggering the VME reset is suicide since it reboots */ +#define SUICIDAL_VME_RESET + +#include "universe.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef __powerpc__ +#include +#endif + +EXPORT_SYMBOL(vme_register_device); +EXPORT_SYMBOL(vme_unregister_device); + +EXPORT_SYMBOL(vme_register_region); +EXPORT_SYMBOL(vme_unregister_region); + +EXPORT_SYMBOL(vme_request_interrupt); +EXPORT_SYMBOL(vme_free_interrupt); + +EXPORT_SYMBOL(vme_alloc_dmalist); +EXPORT_SYMBOL(vme_queue_dmalist); +EXPORT_SYMBOL(vme_free_dmalist); + +EXPORT_SYMBOL(vme_safe_access); +EXPORT_SYMBOL(vme_modbits); + +/* Module parameters: + * a) a bit mask to tell which images must be permanently mapped. Permanent + * maps are handy in some cases, and are the only ones on which the read() + * and write() system calls work, but may lead to large kernel virtual memory + * space consumption. All other images are dynamically mapped in kernel space + * as required by the board specific driver or in user space by the mmap + * system call. + * b) a flag to reset the VME bus at module initialization, but only if + * SUICIDAL_VME_RESET is not defined. + */ + +static u_int permanent_maps = 0x03; +MODULE_PARM(permanent_maps, "i"); +#ifndef SUICIDAL_VME_RESET +static u_int reset = 0; +MODULE_PARM(reset, "i"); +#endif + +/* This has been directly copied from drivers/char/mem.c, as the comment + * say it is ugly to have to repeat it here. + */ + +/* + * This should probably be per-architecture in + */ +static inline +unsigned long pgprot_noncached(unsigned long prot) { +#if defined(__i386__) + if (boot_cpu_data.x86 > 3) + prot |= _PAGE_PCD; +#elif defined(__powerpc__) + prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; +#elif defined(__mc68000__) + if (CPU_IS_020_OR_030) + prot |= _PAGE_NOCACHE030; + /* Use no-cache mode, serialized */ + if (CPU_IS_040_OR_060) + prot = (prot & _CACHEMASK040) | _PAGE_NOCACHE_S; +#elif defined(__mips__) + prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; +#endif + + return prot; +} + +/* End of drivers/char/mem.c stolen code. */ + +#ifndef __powerpc__ +#define iobarrier_rw() do { } while(0) +#define iobarrier_w() do { } while(0) +#endif + +/* Defining this makes the initialization verbose, perhaps too verbose ! */ +#define UNIVERSE_DEBUG +#undef UNIVERSE_DEBUG + + +/* bit indexes for the state field in the private section */ +#define IRQ_ACTIVE 0 +#define DMA_ACTIVE 1 +#define DMA_CANCEL 2 /* Not used ATM */ +#define DMA_TIMEDOUT 3 +#define IRQ_ACTIVE_MASK (1<state = TASK_UNINTERRUPTIBLE; + schedule_timeout(HZ/3); + } else { + SET_REG(tmp, MISC_CTL); + } +#endif + + /* This setup of mast_ctl is for single VMEBus system master, clearing + * the vown bit just in case. In single master mode, we are not + * affected by the bug on the MAXRTRY counter but we nevertheless + * enable the workaround. All the other values (VRM_DEMAND, VREL_RWD + * and PABS32) are required to work around bugs. PABS64 may only be + * set if no PCI to VME DMA is performed on Universe I! + */ + + SET_REG(UNIVERSE_MAXRTRY_INFINITE | UNIVERSE_PWON_128 | + UNIVERSE_VRL(3) | UNIVERSE_VRM_DEMAND | UNIVERSE_VREL_RWD | + UNIVERSE_PABS_32, + MAST_CTL); + + /* Set VON and VOFF, VON is set to 512 because it can seriously + * affect interrupt latency and number of retries for direct bus + * operations. Let the DMA stop for 32uS between tenures. Enable + * interrupts (Side effect: clearing all pending status bits STOP + * to P_ERR). These choices allow to take an interrupt after + * a DMA tenure (since the interrupt acknowledge cycle will have + * the highest priority) and leave 32 uS until the interrupt + * handlers start accessing the VME bus, then the CWT timer will keep + * the VME bus to the coupled channel until no more accesses are + * performed for a few microseconds, at this time, the DMA will + * restart operation for a full block before sampling interrupts + * again. These choices depend strongly on the application and + * should be tuned accordingly, they are simply aimed at providing + * reasonable default values. + * In the future, it might be possible to stop DMA while interrupt + * handlers are running to guarantee handlers a more deterministic + * access to the VME bus. The flags field in the struct vme_interrupt, + * currently unused, is actually designed to provide this kind of + * information to the generic vme interrupt handler. This would allow + * optimizations like avoiding to suspend and restart the DMA when the + * handler actually does not need VME access (which can only happen + * with ROAK interrupts). But for now we don't even stop the DMA when + * an interrupt comes in: we simply hope that the accesses performed + * by the handlers start before the VOFF timer expires and that + * ownership of the VME bus is kept by the CWT timer. + */ + tmp = GET_REG(DGCS) & ~ dgcs_mask; + + tmp |= dgcs_mask & (UNIVERSE_DGCS_CHAIN | UNIVERSE_DGCS_VON_512 | + UNIVERSE_DGCS_VOFF_32uS | UNIVERSE_DGCS_INTMASKS); + + SET_REG(tmp, DGCS); + + /* Set up the miscellaneous PCI register, we set CRT to 1024uS + * because this is is similar to the fixed value (32768 PCI clocks) + * used by later revisions of the chip. And set CWT to 128 PCI clocks + * (almost 4 us). + */ + SET_REG(UNIVERSE_CRT_1024uS | UNIVERSE_CWT_128, LMISC); + + +#ifdef UNIVERSE_IMAGE_SETUP + /* Setting up slave images should have been performed earlier by a + * resource allocator, but not all boards have this capability. + * In this case, the following code should be edited to suit your + * needs. + */ + /* The following is an outdated description, it is only valid + * on PPC without the powerplus special setup. + */ + /* Set up the Special slave image with the following: + * - mapped at the highest possible adress of BAT1: 0xCC000000 + * on PPC side, corresponding to 0x0C000000 on PCI, or 3 times + * 64 Mbytes, + * - first block: user mode, 16 bits maximum data width, + * - second block: user mode, 32 bits width, + * - third block: supervisor mode, 16 bits maximum data width, + * - fourth block: supervisor mode, 32 bits width, + * All images generate data space access (I don't expect anybody + * to run code through the VMEBus, if you need to do it first copy + * it to local memory and then execute it there since it can be cached + * and executes much faster). + * For now write posting is disabled because it may result in bus + * lockups due to errata in Universe V1, but it may be tested and + * won't affect the functionality of the driver. + */ + /* Don't want to export a symbol just for this, actually this should + * be setup by firmware according to user's needs. + */ +#ifndef __powerpc__ +#define pwrplus 0 +#else +#define pwrplus ((_machine == _MACH_prep) && (universe.bus_delta == 0)) +#endif + SET_REG(UNIVERSE_SLAVE_EN | + UNIVERSE_SLSI_VDW_32*((1<<1)+(1<<3)) | + UNIVERSE_SLSI_PRIV_SUPER*((1<<2)+(1<<3)) | + ((pwrplus ? 0xf4000000 : 0xcc000000) + -universe.bus_delta)>>UNIVERSE_SLSI_BS_ADDR_SHIFT, + SLSI); + + + /* Set up image 0 base and bound (8kB) */ + SET_REG((pwrplus ? 0xfc800000 : 0xc1ffe000 - universe.bus_delta), + LSI_BS(0)); + SET_REG((pwrplus ? 0xfc802000 : 0xc2000000 - universe.bus_delta), + LSI_BD(0)); + /* TO and CTL are dynamically programmed according to needs */ + + /* Set up the CR/CSR address space for tests */ + SET_REG((pwrplus ? 0xf3000000 : 0xc2000000 - universe.bus_delta), + LSI_BS(1)); + SET_REG((pwrplus ? 0xf4000000 : 0xc3000000 - universe.bus_delta), + LSI_BD(1)); + SET_REG(0x00000000, LSI_TO(1)); + SET_REG(UNIVERSE_SLAVE_EN | UNIVERSE_VAS_CRCSR | UNIVERSE_VDW_32, + LSI_CTL(1)); + + /* Set up an A32/D32 image with BLT for tests */ + SET_REG((pwrplus ? 0xe0000000: 0xc8000000 - universe.bus_delta), + LSI_BS(2)); + SET_REG((pwrplus ? 0xf3000000: 0xcc000000 - universe.bus_delta), + LSI_BD(2)); + SET_REG(universe.bus_delta, LSI_TO(2)); + SET_REG(UNIVERSE_SLAVE_EN | UNIVERSE_VAS_A32 | UNIVERSE_VDW_32 | + UNIVERSE_BLT, + LSI_CTL(2)); +#endif /* UNIVERSE_IMAGE_SETUP */ + + } + + + +#ifdef UNIVERSE_DEBUG +#define dprintk(args...) printk(KERN_DEBUG ##args) +#define print_register(reg) printk(KERN_DEBUG \ + "register " #reg " =%8.8x\n",GET_REG(reg)) +#define print_register_i(reg, i) printk(KERN_DEBUG \ + "register " #reg "(%d) =%8.8x\n", \ + i, GET_REG(reg(i))) + + +static void print_registers(void) +{ + volatile u_char *__rp=universe.reg_base; + int i, limit; + if (universe.revision) limit=8; else limit=4; + for (i=0; i (PAGE_SIZE/sizeof(struct universe_dma_entry)) || + maxfrags <=0 || !dma->handler) return -EINVAL; + v = kmalloc(maxfrags * sizeof(struct universe_dma_entry), + GFP_KERNEL); + if (v == NULL) return -ENOMEM; + write_lock_irqsave(&universe.lock, fl); + if (dma->device) { + kfree(v); + retval = -EBUSY; + goto out; + } + dma->private = v; + dma->maxfrags = maxfrags; + dma->flags = 0; + dma->device = dev; + + dma->next = dev->dmalists; + dev->dmalists = dma; + dma->flags = 1<flags)) + && test_bit(VME_DMA_BUSY, &dma->flags)) { + /* We could optimize by searching the list and removing the + * entry if it is in the queue of pending operations. But + * if the dma list is being setup by queue_dmalist it will + * not be found. So the worst case remains the same + * and only the simple method is implemented: being + * woken up when the dma has finished. + */ + spin_unlock_irqrestore(&universe.dmalock, fl); + wait_event(universe.dma_free_wait, + !test_bit(VME_DMA_BUSY, &dma->flags)); + } else { + spin_unlock_irqrestore(&universe.dmalock, fl); + if (!wasready) return; + } + + /* Now the dmalist can be freed */ + write_lock_irqsave(&universe.lock, fl); + if (!dma->device) goto out; + if (dma->device->dmalists == dma) { + dma->device->dmalists = dma->next; + } else { + struct vme_dma *p = dma->device->dmalists; + for (; p && p->next != dma; p=p->next); + if (!p) goto out; + p->next = p->next->next; + } + dma->device = NULL; + dma->queue = NULL; + kfree (dma->private); + dma->private = NULL; + out: write_unlock_irqrestore(&universe.lock, fl); +} + + +int +vme_queue_dmalist(struct vme_dma *dma, struct vme_dmavec *dv, size_t frags) { + u_long fl; + struct universe_dma_entry *p; + size_t totlen = 0; + int retval; + /* The locking is quite complex because of interaction with + * vme_free_dmalists. The BUSY bit does not need the lock + * when cleared, and READY when set. All other transitions + * require dma_lock to be acquired first. + */ + spin_lock_irqsave(&universe.dma_lock, fl); + retval = -EINVAL; + /* Prevent multiple simultaneous uses of the same DMA list or + * queuing a list which is not READY (may mean being freed). + */ + if (frags > dma->maxfrags || frags <= 0 || dma->timeout<2 + || !test_bit(VME_DMA_READY, &dma->flags)) { + goto out; + } + retval = -EBUSY; + if (test_and_set_bit(VME_DMA_BUSY, &dma->flags)) goto out; + spin_unlock_irqrestore(&universe.dma_lock, fl); + + /* Now fill the Universe scatter gather list */ + for (p = dma->private; frags!=0; p++, dv++, frags--) { + u32 dctl = am2ctl[(dv->flags & VME_AM_MASK)>>VME_AM_SHIFT] & + dw2ctl[(dv->flags&VME_DW_MASK)>>VME_DW_SHIFT]; + + /* some sanity checks: only one DW flag bit is set, the + * Universe DMA byte counter has only 24 bits (it's not a + * limitation since you can't allocate enough contiguous + * kernel memory anyway), the data width must be compatible + * with the AM code, DMA must be supported for this AM code, + * and the low 3 bit of addresses must be the same. + */ + if (!(dctl & UNIVERSE_SLAVE_EN) || + (dv->length & 0xff000000) || + (am_bad_dw[(dv->flags & VME_AM_MASK)>>VME_AM_SHIFT] + & dv->flags) || + (am_bad_use[(dv->flags & VME_AM_MASK)>>VME_AM_SHIFT] + & VME_USE_DMA) || + ((dv->kvaddr ^ dv->vme_addr) & 7)) return -EINVAL; + totlen += dv->length; + + dctl = (dctl & ~UNIVERSE_SLAVE_EN) | + (((dv->flags&VME_DMA_DIRECTION_MASK) + >>VME_DMA_DIRECTION_SHIFT)<__res01 = p->__res02 = p->__res03 = 0; + __put_le32(dv->vme_addr, &p->vaddr); + __put_le32(virt_to_bus((void *)dv->kvaddr), &p->laddr); + __put_le32(dv->length, &p->dtbc); + __put_le32(dctl, &p->dctl); + __put_le32((frags == 1) ? 1 : virt_to_bus(p+1), &p->dcpp); + } + dma->remlen = totlen; + dma->queue = NULL; + + spin_lock_irqsave(&universe.dma_lock, fl); + if (universe.dma_queue_end) { + universe.dma_queue_end->queue = dma; + } else { + universe.dma_queue = dma; + /* We have to start the DMA in the bridge since it's + * stopped unless the interrupt handler, which will + * take care of starting it, is running. + */ + if (!test_bit(IRQ_ACTIVE,&universe.state)) { + /* Now the interrupt may come on another processor + * and set IRQ_ACTIVE to prevent interfering with + * PIO bus accesses while the interrupt is running. + * It's too late, the DMA will be started... + */ + volatile u_char * __rp = universe.reg_base; + SET_REG(0, DTBC); + SET_REG(virt_to_bus(dma->private), DCPP); + iobarrier_w(); + SET_REG(universe.cached_dgcs | + UNIVERSE_DGCS_GO, DGCS); + universe.dma_timer.expires = jiffies + dma->timeout; + add_timer(&universe.dma_timer); + set_bit(DMA_ACTIVE, &universe.state); + } + } + universe.dma_queue_end = dma; + retval = 0; + out: spin_unlock_irqrestore(&universe.dma_lock,fl); + return retval; +} + +#ifdef UNIVERSE_DEBUG +static void inline +dump_sglist(struct universe_dma_entry *p) { + printk(KERN_DEBUG " Start of list: \n"); + do { + printk(" dctl: %08x\n dtbc: %08x\n" + " laddr: %08x\n vaddr: %08x\n" + " dcpp: %08x\n", + __get_le32(&p->dctl), __get_le32(&p->dtbc), + __get_le32(&p->laddr), __get_le32(&p->vaddr), + __get_le32(&p->dcpp)); + if (__get_le32(&p->dcpp) & 1) break; + p = bus_to_virt(__get_le32(&p->dcpp) & ~7); + } while(1); + printk(KERN_DEBUG " End of list.\n"); +} + +/* Actually this one should be protected with spinlocks, but it was only + * used to check that the Universe dma descriptor lists were correctly built. + */ +void +vme_dump_dmalists(void) { + struct vme_device * dev; + printk(KERN_DEBUG "Dumping DMA lists...\n"); + for (dev=&vme_root; dev; dev=dev->next) { + struct vme_dma * dma; + if (dev->dmalists == NULL) continue; + printk (KERN_DEBUG "DMA lists for device %s.\n", dev->name); + for(dma=dev->dmalists; dma; dma=dma->next) { + if (dma->private) dump_sglist(dma->private); + } + } +} +#endif +/* Looking for an image with the specified attributes: note that we ask + * for an exact match on the addressing space (AM code), but the caller may + * not care about a precise data width attribute. For example to access a D8(O) + * slave you don't care whether the Universe might generate 16 or 32 bit + * accesses so any image with DW=8, 16 or 32 is suitable. + * That's why there are 4 bits and not 2 for data width attributes and they + * are said to match if the and of image and requested flags is non zero. + * + * IMPORTANT: This function is called after having checked + * that size is > 0 and that there is no wraparound ! + */ +static struct vme_region * +find_root_region(u_int flags, u_long base, u_long limit) { + struct vme_region * p; + + for ( p = vme_root.regions; p; p = p->next) { + if ((flags ^ p->flags) & VME_AM_MASK) continue; + if (!(flags & p->flags & VME_DW_MASK)) continue; + /* Don't remove the -1, they are required for correctness + * if an image maps the high end of the 32 bit address space! + */ + if ((base >= p->base) && + (limit <= p->limit)) break; + } + return p; +} + +/* VME_USE_PIO means that the kvaddr has to be set or return an error, + * VME_USE_MAP means that the region has been mapped by ioremap and iounmap + * should be performed when freed by unregister region. Otherwise, it is a + * subset of a permanently mapped region. VME_USE_RMW has its standard meaning + * but is not actually enforced in the corresponding code (it's kernel code, + * you should know what you are doing). VME_USE_DMA set means the region is + * used for DMA and will show as such in /proc/bus/vme/regions. + * + */ + +int +vme_register_region(struct vme_device *dev, struct vme_region *reg) { + u_long fl; + struct vme_region *p; + u32 flags = reg->flags & ~VME_USE_MAP; + volatile u_char * kvaddr; + int retval; + + /* Check a) that at least one DW bit is set, b) that if it is not a + * PIO only region, only one of the DW bits is set, c) that DW is + * valid for this AM, d) that no VME_USE flag incompatible with + * AM is set, e) that the range is non empty, and f) that the range + * does not exceed the limit for this AM. + */ + if (!(VME_DW_MASK & flags) || + (((flags & (VME_USE_DMA | VME_USE_PIO)) != VME_USE_PIO) && + !(dw2ctl[(flags & VME_DW_MASK)>>VME_DW_SHIFT])) || + (am_bad_dw[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & flags) || + (am_bad_use[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & flags) || + (reg->limit < reg->base) || + (reg->limit > + vas_masks[(am2ctl[(flags & VME_AM_MASK) >> VME_AM_SHIFT] + &UNIVERSE_VAS_MASK) >> UNIVERSE_VAS_SHIFT])) + return -EINVAL; + + /* If requested, map the area in the kernel virtual memory */ + p = 0; + kvaddr=0; + if (flags & VME_USE_PIO) { + p = find_root_region(reg->flags, reg->base, reg->limit); + if (!p) return -ENXIO; + /* Mask the DW flags with the one actually used */ + flags &= p->flags | ~VME_DW_MASK; + /* Don't ioremap permanently mapped areas */ + if (p->flags & VME_USE_PIO) { + kvaddr = p->kvaddr + (reg->base - p->base); + } else { + kvaddr = ioremap(p->phyaddr + (reg->base - p->base), + reg->limit + 1 - reg->base); + /* This may not be the best error code but it is very + * likely to be the cause. + */ + if (!kvaddr) return -ENOMEM; + /* Record that iounmap will have to be done */ + flags |= VME_USE_MAP; + } + } + write_lock_irqsave(&universe.lock, fl); + if (reg->device) { + if (flags & VME_USE_MAP) iounmap((void *)kvaddr); + retval = -EBUSY; + goto out; + } + reg->phyaddr = p ? p->phyaddr + (reg->base - p->base) : 0; + reg->flags = flags; + reg->kvaddr = kvaddr; + reg->device = dev; + reg->next = dev->regions; + dev->regions = reg; + retval = 0; + out: write_unlock_irqrestore(&universe.lock, fl); + return retval; +} + +void +vme_unregister_region(struct vme_region *reg) { + u_long fl; + + write_lock_irqsave(&universe.lock, fl); + if(!reg->device) goto out; + if(reg->device->regions == reg) { + reg->device->regions = reg->next; + } else { + struct vme_region *p = reg->device->regions; + for (; p && p->next != reg; p=p->next); + if (p) p->next = p->next->next; + } + reg->device = NULL; + if (reg->flags & VME_USE_MAP) { + iounmap((void *)reg->kvaddr); + } + reg->kvaddr = 0; + reg->phyaddr = 0; + out: write_unlock_irqrestore(&universe.lock, fl); +} + +int +vme_request_interrupt(struct vme_device *dev, struct vme_interrupt *intr) { + int retval; + u_long fl; + volatile u_char * __rp = universe.reg_base; + + if (intr->level <1 || intr->level>7 || + intr->vector>255 || !intr->handler) + return -EINVAL; + /* If the interrupt level has been permanently disabled because + * of a serious problem return -EBUSY. There might be a better error + * code. Strange enough valid_virqs is only modified by the interrupt + * handler which acquires the read_lock, and here is the only place + * where it is tested is with the write lock held, but it works... + */ + write_lock_irqsave(&universe.lock, fl); + retval = -EBUSY; + if ((universe.valid_virqs & (1<<(intr->level))) && (!intr->device) && + (universe.ints[intr->level-1][intr->vector] == 0)) { + intr->device = dev; + intr->next = dev->interrupts; + dev->interrupts = intr; + intr->count = 0; + universe.ints[intr->level-1][intr->vector] = intr; + universe.virq[intr->level-1].handlers++; + if (universe.virq[intr->level-1].handlers == 1) { + universe.cached_lint_en |= 1<<(intr->level); + SET_REG(universe.cached_lint_en, LINT_EN); + } + retval = 0; + } + write_unlock_irqrestore(&universe.lock, fl); + return retval; +} + +void +vme_free_interrupt(struct vme_interrupt *intr) { + u_long fl; + volatile u_char * __rp = universe.reg_base; + + write_lock_irqsave(&universe.lock, fl); + if (!intr->device) goto out; + if(intr->device->interrupts == intr) { + intr->device->interrupts = intr->next; + } else { + struct vme_interrupt *p = intr->device->interrupts; + for (; p && p->next != intr; p=p->next); + if (p) p->next = p->next->next; + } + intr->device = NULL; + universe.ints[intr->level-1][intr->vector]=NULL; + universe.virq[intr->level-1].handlers--; + if (universe.virq[intr->level-1].handlers == 0) { + universe.cached_lint_en &= ~(1<<(intr->level)); + SET_REG(universe.cached_lint_en, LINT_EN); + } + out: write_unlock_irqrestore(&universe.lock, fl); +} + +static inline void +vme_remove_resources(struct vme_device *dev) { + while (dev->dmalists) vme_free_dmalist(dev->dmalists); + while (dev->regions) vme_unregister_region(dev->regions); + while (dev->interrupts) vme_free_interrupt(dev->interrupts); +} + +int +vme_register_device(struct vme_device * dev) { + u_long fl; + int retval = -EBUSY; + struct vme_device *p; + /* Insert in sorted by minor number: it will fail when trying + * to register twice the same device. Note that device that set + * the minor to 0 and fileops to NULL are allowed: it means + * that they are not opened through this driver. + */ + write_lock_irqsave(&universe.lock, fl); + if (dev->fops || dev->minor) { + for (p= &vme_root; p->next; p=p->next) { + if (p->next->minor > dev->minor) break; + if (p->next->minor == dev->minor) goto out; + } + } else { + for(p= &vme_root; p->next; p=p->next); + } + dev->next = p->next; + p->next = dev; + retval = 0; + out: write_unlock_irqrestore(&universe.lock, fl); + return retval; +} + +void +vme_unregister_device(struct vme_device *dev) { + u_long fl; + struct vme_device *p; + + write_lock_irqsave(&universe.lock, fl); + /* Note that we can't unregister the root vme device, + * but that's exactly what we want. + */ + for(p=&vme_root; p->next && p->next!=dev; p=p->next); + if (p->next == dev) { + p->next=dev->next; + } + write_unlock_irqrestore(&universe.lock, fl); + vme_remove_resources(dev); + return; +} + +static int +universe_open(struct inode * inode, struct file * file) +{ + /* Note that this call is not protected by spinlocks, so that + * the open function of the device may register/unregister regions, + * interrupts and dma lists. Right now open and close are always + * interlocked with module loading/unloading. Changes will be required + * if this is not the case in the future (many operations in this + * driver are already protected against concurrent operations because + * most of the structures may be read during interrupts). + */ + + int minor = MINOR(inode->i_rdev); + struct vme_device *p; + + for (p=&vme_root; p && p->minor!=minor; p=p->next); + + if (!(p && p->fops && p->fops->open)) return -ENODEV; + + if (p==&vme_root) { + struct vme_region * f; + f = (struct vme_region *) kmalloc(sizeof(*f), GFP_KERNEL); + if (!f) return -ENOMEM; + /* A SET_ATTR ioctl is necessary before: + * any access may be performed. + */ + MOD_INC_USE_COUNT; + f->kvaddr = 0; + f->phyaddr = 0; + f->base = 1; + f->limit = 0; + f->flags = 0; + f->next = NULL; + f->device = &vme_root; + file->private_data = f; + return 0; + } else { + file->f_op = p->fops; + return file->f_op->open(inode, file); + } +} + +static int +universe_release(struct inode * inode, struct file * file) +{ + /* kfree is a nop when called with a NULL pointer */ + kfree(file->private_data); + MOD_DEC_USE_COUNT; + return 0; +} + +static loff_t universe_llseek(struct file *file, loff_t offset, int origin) { + struct vme_region * FPD = (struct vme_region *) file->private_data; + loff_t retval; + switch(origin) { + case 2: + offset += FPD->limit + 1 - FPD->base; + break; + case 1: + offset += file->f_pos; + break; + } + retval = -EINVAL; + if (offset>=0) { + retval = file->f_pos = offset; + } + return retval; +}; + +static ssize_t +universe_read(struct file *file, char * buf, size_t count, loff_t *ppos) { + struct vme_region * FPD = (struct vme_region *) file->private_data; + ssize_t retval; + size_t maxpos = FPD->limit - FPD->base; + + if (!(FPD->flags&VME_USE_PIO)) return -EINVAL; + /* Check that position and counts are ok */ + if (*ppos > maxpos) return 0; + if ( count > (maxpos - *ppos)) + count = maxpos - *ppos; + + /* Then check for bad buffer */ + if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; + + /* We use here an arch specific routine which might not + * work with other machines since byteorder of PPC is same + * as VME. + */ +#ifdef __powerpc__ + retval = copy_io_to_user(buf, FPD->kvaddr + *ppos, count); + return retval ? -EFAULT : count; +#else +#warning "No copy_io_to_user routine supported in this architecture for now." + return -EINVAL; +#endif +} + +static ssize_t +universe_write(struct file *file, const char *buf, size_t count, loff_t *ppos){ + struct vme_region * FPD = (struct vme_region *) file->private_data; + ssize_t retval; + size_t maxpos = FPD->limit - FPD->base; + + if (!(FPD->flags&VME_USE_PIO)) return -EINVAL; + /* Check that position and counts are ok */ + if (*ppos > maxpos) return 0; + if ( count > (maxpos - *ppos)) + count = maxpos - *ppos; + + /* Then check for bad buffer */ + if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; + + /* We use here an arch specific routine which might not + * work with other machines since byteorder of PPC is same + * as VME. + */ +#ifdef __powerpc__ + retval = copy_user_to_io(FPD->kvaddr + *ppos, buf, count); + return retval ? -EFAULT : count; +#else +#warning "No copy_user_to_io routine supported in this architecture for now." + return -EINVAL; +#endif +} + + +static int +universe_mmap(struct file *file, struct vm_area_struct *vma) { + struct vme_region * FPD = (struct vme_region *) file->private_data; + u_long off = vma->vm_offset; + u_long len = vma->vm_end - vma->vm_start; + /* Caution: this might fail badly when PAGE_SIZE is 8k or more + * and Universe's images of 4kB resolution (0 and 4) are used. + * But this should be solved somewhere else to force mapping + * using these images with PAGE_SIZE alignment and size. + * Note also that do_mmap ensures that off + len does not wraparound + * so that this case is not checked here. + */ + if (!(FPD->flags & VME_USE_MAP) || + (off + FPD->base) & ~PAGE_MASK) return -EINVAL; + + if ((off + len - 1) > (FPD->limit - FPD->base)) return -ENXIO; + + pgprot_val(vma->vm_page_prot) = + pgprot_noncached(pgprot_val(vma->vm_page_prot)); + + vma->vm_flags |= VM_IO; + if (remap_page_range(vma->vm_start, (u_long) FPD->phyaddr+off, + len, vma->vm_page_prot)) return -EAGAIN; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,2,3) + /* This is now properly done in mm/mmap.c */ + vma->vm_file=file; + file->f_count++; +#endif + return 0; +}; + +/* This function works for now because there is no HW error reporting through + * machine checks or similar mechanisms. + */ +int +vme_safe_access(u_int cmd, u32 flags, u_long vme_addr, u_long *value) { + volatile u_char *__rp = universe.reg_base, *vp; + u_long fl; + unsigned short status; + int retval; + u32 ctl = am2ctl[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & + dw2ctl[(flags&VME_DW_MASK)>>VME_DW_SHIFT]; + + /* Check that DW is valid for this AM */ + if (!(ctl & UNIVERSE_SLAVE_EN) || + (am_bad_dw[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & flags)) + return -EINVAL; + + vp = universe.root_regions[0].kvaddr + (vme_addr & 0xfff); + /* On SMP, the following code guarantees only that, if there are no + * bus errors on the access, the device is responding. But since + * devices can be mmapped from user mode, the only safe way to check + * for bus errors would be to put all other processors in a kernel + * mode idle loop. There is currently no way to do this (it would + * probably require a special IPI and it would in any case be bad + * for performance). Note these functions are not designed to be used + * often, only to make sure that the hardware is present, for + * diagnostic purposes, and when an operation requires interlock with + * atomic (vme_modbits) calls, so performance is not a major concern. + */ + dprintk("vme_safe_access: cmd=%x, flags=%x, vme_addr=%x\n", + cmd, flags, vme_addr); + spin_lock_irqsave(&universe.bus_lock, fl); + SET_REG(ctl, LSI_CTL(0)); + SET_REG((vme_addr-universe.root_regions[0].phyaddr) & ~0xfff, + LSI_TO(0)); + SET_REG_HALF(PCI_STATUS_SIG_TARGET_ABORT, PCI_STATUS); + iobarrier_rw(); /* Needed for VME_READ cases */ + switch(cmd) { + case VME_READ8: + *value = vme_read8(vp); + break; + case VME_READ16: + *value = vme_read16(vp); + break; + case VME_READ32: + *value = vme_read32(vp); + break; + case VME_WRITE8: + vme_write8(*value, vp); + break; + case VME_WRITE16: + vme_write16(*value, vp); + break; + case VME_WRITE32: + vme_write32(*value, vp); + break; + default: + retval = -EINVAL; + goto out; + } + iobarrier_rw(); /* Needed for VME_WRITE cases */ + status = GET_REG_HALF(PCI_STATUS); + retval = 0; + out: spin_unlock_irqrestore(&universe.bus_lock, fl); + dprintk("vme_safe_access: ctl=%x, to= %x, value=%x\n", + ctl, (vme_addr-universe.root_regions[0].phyaddr) & ~0xfff, + *value); + if (!retval && status&PCI_STATUS_SIG_TARGET_ABORT) retval = -EIO; + return retval; +} + +/* This should always use true VME RMW cycles, but there are so many bugs in + * the Universe V1 that a workaround has been implemented exactly as suggested + * by the errata except for the fact that we don't lock the VMEBus with the + * VOWN bit, a non-issue in single master environments and even in multimaster + * environments when the CWT timer is not disabled (we set it to about + * 4 microseconds by default) and the PCI bus latency is not too high. + * + * Note that the Universe way of handling RMW cycles may not always be + * what users want: the comparison is done on a bit by bit basis so that it + * makes for example the emulation of a compare and swap impossible. + * + * That makes the compare data register in the special cycle generator + * redundant: actually it may be set to the complement of the new data + * to be written and each unmasked bit will be set as desired. + */ + +/* Warning: this function is not yet stabilized, its parameters and operation + * are still likely to change in the future. + */ + +int +vme_modbits(u_int cmd, u32 flags, u_long vme_addr, + u_int *value, u_int mask) { + volatile u_char *__rp = universe.reg_base, + *vp = universe.root_regions[0].kvaddr + (vme_addr & 0xfff); + u_long fl, size = 1<<(_IOC_NR(cmd)-_IOC_NR(VME_MODBITS8)); + int retval; + u32 ctl = am2ctl[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & + dw2ctl[(flags&VME_DW_MASK)>>VME_DW_SHIFT]; + + + /* Check that only one DW bit is set, that DW is valid for this AM, + * that RMW cycle are actually allowed for this flag settings, that + * size is not larger than the used VME bus width, and that the access + * is properly aligned. Experience shows that RMW cycles also work + * with the special slave image (which is not obvious from the + * documentation), but it is irrelevant here. + */ + if (!(ctl & UNIVERSE_SLAVE_EN) || + (am_bad_dw[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & flags) || + (am_bad_use[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & + VME_USE_RMW) || + (size > ((flags & VME_DW_MASK)>>VME_DW_SHIFT)) || + vme_addr & (size-1)) return -EINVAL; + + spin_lock_irqsave(&universe.bus_lock, fl); + SET_REG(ctl, LSI_CTL(0)); + SET_REG((vme_addr-universe.root_regions[0].phyaddr) & ~0xfff, + LSI_TO(0)); + vp = universe.root_regions[0].kvaddr + (vme_addr & 0xfff); + retval = 0; + if (universe.revision) { + /* Using the special cycle generator, note that we don't care + * about the order while setting the special cycle + * generator so we don't need any iobarrier: if the + * processor or the host bridge attempts to perform 64 bit + * tranfers through write combining, the Universe will not + * acknowledge them and the tranfer will automatically fall + * back to a series of non burst 32 bit accesses. This should + * also be correct on little endian processors but has not + * been tested. + */ + int shcnt = 32 - ((size+vme_addr)&3)*8; + SET_REG_BE(mask<private_data; + u_int flags, rmwval; + int error, size; + u_long base, limit, offset, value, mask; + struct vme_region *reg; + + if (_IOC_SIZE(cmd) && + !access_ok((_IOC_DIR(cmd) & _IOC_READ) ? VERIFY_WRITE:VERIFY_READ, + arg, _IOC_SIZE(cmd))) return -EFAULT; + switch(cmd) { +#define param ((VME_attr *) arg) + case VME_SET_ATTR: + error = __get_user(base, ¶m->base); + error |= __get_user(limit, ¶m->limit); + error |= __get_user(flags, ¶m->flags); + if (error) break; + /* Is this the right capability to use ? Everybody seems to use + * CAPS_SYS_ADMIN as a default to replace suser(). + */ + error = -EPERM; + if (!capable(CAP_SYS_ADMIN)) break; + error = -EBADF; + if ((flags & VME_USE_RMW) && (file->f_mode != 3)) break; + + /* Check a) that at least one DW bit is set, b) that if it + * is not a PIO/MAP only region, only one of the DW bits is + * set, c) that DW is valid for this AM, d) that no VME_USE + * flag incompatible with AM is set, e) that the range is not + * empty, and f) that the address is within the allowed + * range for this AM ! + */ + error = -EINVAL; + if (!(VME_DW_MASK & flags) || + (((flags & VME_USE_DMA) || + !(flags & (VME_USE_PIO|VME_USE_MAP))) && + !(dw2ctl[(flags & VME_DW_MASK)>>VME_DW_SHIFT])) || + (am_bad_dw[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & flags) || + (am_bad_use[(flags & VME_AM_MASK)>>VME_AM_SHIFT] & + flags) || + (limit < base) || + (limit > + vas_masks[(am2ctl[(flags & VME_AM_MASK) >> VME_AM_SHIFT] + &UNIVERSE_VAS_MASK) >> UNIVERSE_VAS_SHIFT])) + break; + + error = -ENXIO; + /* Read and write are only allowed on persistently mapped + * regions. Which might not be frequent in the future. + */ + if (flags & (VME_USE_PIO|VME_USE_MAP)) { + reg = find_root_region(flags, base, limit); + if (!reg) break; + if (flags & VME_USE_PIO) { + if (!(reg->flags & VME_USE_PIO)) break; + FPD->kvaddr = reg->kvaddr + + (base - reg->base); + } else { + FPD->kvaddr = 0; + } + flags &= (reg->flags | ~VME_DW_MASK); + FPD->phyaddr = reg->phyaddr + + (base - reg->base); + } else { + FPD->phyaddr = 0; + FPD->kvaddr = 0; + } + /* Set file pointer to 0 ? */ + FPD->flags = flags; + FPD->base = base; + FPD->limit = limit; + error = 0; + break; + + case VME_GET_ATTR: + error = __put_user(FPD->base, ¶m->base); + error |= __put_user(FPD->limit, ¶m->limit); + error |= __put_user(FPD->flags, ¶m->flags); + break; + +#undef param +#define param ((VME_safe_access *) arg) + case VME_WRITE8: + case VME_WRITE16: + case VME_WRITE32: + error = __get_user(value, ¶m->value); + if (error) break; + case VME_READ8: + case VME_READ16: + case VME_READ32: + error = __get_user(offset, ¶m->offset); + size = 1<<(_IOC_NR(cmd)-_IOC_NR(VME_READ8)); + if (error) break; + + /* Check that the access is allowed */ + error = -EBADF; + if (!(file->f_mode & + ((_IOC_DIR(cmd) & _IOC_READ) ? + FMODE_READ : FMODE_WRITE))) break; + + /* Check that the address is valid */ + error = -EINVAL; + if (((offset+size-1) < offset) || + ((offset+size-1) > (FPD->limit - FPD->base))) break; + + error = vme_safe_access(cmd, FPD->flags, FPD->base+offset, + &value); + if (error || !(_IOC_DIR(cmd) & _IOC_READ)) break; + error = __put_user(value, ¶m->value); + break; +#undef param + +#define param ((VME_modbits *) arg) + case VME_MODBITS8: + case VME_MODBITS16: + case VME_MODBITS32: + error = -EINVAL; + if (!(FPD->flags & VME_USE_RMW)) break; + error = __get_user(offset, ¶m->offset); + error |= __get_user(rmwval, ¶m->value); + error |= __get_user(mask, ¶m->mask); + + size = 1<<(_IOC_NR(cmd)-_IOC_NR(VME_MODBITS8)); + if (error) break; + error = -EINVAL; + /* Check that the address is within the allowed range */ + if (((offset+size-1) < offset) || + ((offset+size-1) > (FPD->limit - FPD->base))) break; + + error = vme_modbits(cmd, FPD->flags, + FPD->base + offset, &rmwval, mask); + if (error) break; + error = __put_user(rmwval, ¶m->value); + break; +#undef param + +#if 0 + /* Not implemented for now, it would require locking + * user mode memory for the duration of the operation + * or be sure that the user has locked the memory + * (but still translating all addresses to kernel + * virtual addresses). + */ +#define param ((VME_dma *)arg) + case VME_DMA_READ: + + break; + + case VME_DMA_WRITE: + + break; +#undef param +#endif + + default: + error = -EINVAL; + break; + } + return error; +} + +/* The interrupt routine is somewhat complex: the device interrupt handler + * may wish to start a DMA operation but we want to defer the activation + * of the DMA until after all interrupts handlers have been called. + * Otherwise the following interrupt handlers might have to wait for the DMA + * throttle to release the bus, which might take up to a few hundred + * microseconds, or an eternity for a modern processor. So we handle the + * device interrupts after having set a flag prohibiting vme_queue_dmalist + * to trigger any operation, than the DMA interrupt if applicable and finally + * we start DMA if it is inactive there are pending DMA operations. The flag + * also would prohibit DMA operations from being started from another + * processor on a SMP system. The first 3 lines are ugly, acquiring and + * releasing a spinlock just to set a flag, if you have a cleaner solution + * please tell me. + */ + +/* I truly wish an ilog2 or something like this would exist, either as + * a compiler builtin or in asm/bitops to find the MSB of an unsigned + * and automatically optimized according to arch and CONFIG options. + */ +#if defined(__powerpc__) +static inline u_int +find_highest_virq(u_int mask) { + u_int lz; + asm("cntlzw %0,%1" : "=r" (lz) : "r" (mask & UNIVERSE_LINT_VIRQS)); + return 31-lz; +} +#else +/* A special case could be defined for x86, but bsr is __slow__ on Pentia */ +static inline u_int +find_highest_virq(u_int mask) { + u_int lvl=0; + if (mask&0xf0) { lvl+=4; mask>>=4; } + if (mask&0x0c) { lvl+=2; mask>>=2; } + return lvl + ((mask >> 1) & 1); +} +#endif + +static void irq_action(int irq, void *dev_id, struct pt_regs *regs) { + struct private *lp = (struct private *) dev_id; + volatile u_char *__rp = lp->reg_base; + u_int reason; + int error; + u32 dgcs; + struct vme_dma *dma; + + /* Note that this flag can be set without a spinlock held, but + * not cleared. In most cases it will prevent the DMA from starting + * while it's set. Protecting the setting with a spinlock would only + * delay the processing of interrupts when another processor holds + * the lock to start a DMA operation. + */ + set_bit(IRQ_ACTIVE, &lp->state); + + /* We don't want anybody to touch the interrupt array while we are in + * the following loop, so we lock against writers. Note that actually + * the interrupt counters are modified (incremented), but it does not + * matter since this code can only execute on one processor at a time + * and the only other place where they are written to is with the + * interrupt-safe write lock held when adding an interrupt entry. + * Besides this, a wrong interrupt count would never be a serious + * problem: it is only read by the /proc code and used for statistics. + */ + read_lock(&lp->lock); + while ((reason=GET_REG(LINT_STAT)) & UNIVERSE_LINT_VIRQS) { + struct vme_interrupt * intr; + u_int lvl, vec, mask; + lvl = find_highest_virq(reason); + vec=GET_REG(V1_STATID-4+4*lvl); + mask = 1<ints[lvl-1] + [vec & UNIVERSE_STATID_VECTOR_MASK])) { + intr->count++; + intr->handler(intr); + } else { + if (vec & UNIVERSE_STATID_ERR) { + vec = UNIVERSE_STATID_ERR; + } + /* If the same spurious vector repeats + * within 2 jiffies, then there is likely + * a problem with a stuck interrupt line, + * or an unhandled interrupt which is not + * of the ROAK type. + */ + if (lp->virq[lvl-1].errors != 0 && + jiffies-lp->virq[lvl-1].tstamp < 2 && + lp->virq[lvl-1].vector == vec) { + lp->valid_virqs &= ~mask; + lp->cached_lint_en &= ~mask; + SET_REG(lp->cached_lint_en, LINT_EN); + printk(KERN_CRIT + "Unhandled VME interrupt " + "vector %d on level %d: level " + "permanently disabled.\n", + vec, lvl); + /* Here we should find a way + * to call all handlers on this level + * to tell them there is a problem. + * That's why the printk is still + * here (and not a dprintk). + */ + lp->virq[lvl-1].errors++; + } else { + printk(KERN_WARNING + "Unhandled VME interrupt " + "vector %d on level %d!\n", + vec, lvl); + lp->virq[lvl-1].unhandled++; + } + lp->virq[lvl-1].tstamp = jiffies; + lp->virq[lvl-1].vector = vec; + } + SET_REG(mask, LINT_STAT); + } + read_unlock(&lp->lock); + + dma = NULL; + if (reason & UNIVERSE_LINT_DMA) { + + spin_lock(&lp->dma_lock); + dma = lp->dma_queue; + lp->dma_queue = dma->queue; + if (lp->dma_queue == NULL) lp->dma_queue_end = NULL; + lp->state = IRQ_ACTIVE_MASK; + spin_unlock(&lp->dma_lock); + + del_timer(&lp->dma_timer); + dgcs = GET_REG(DGCS); + /* Clear all pending interrupt flags and the LINT_STAT + * which must also be cleared. + */ + SET_REG(dgcs, DGCS); + SET_REG(UNIVERSE_LINT_DMA, LINT_STAT); + + if (dgcs & UNIVERSE_DGCS_DONE) { + error = 0; + dma->remlen = 0; + } else if (dgcs & (UNIVERSE_DGCS_LERR | UNIVERSE_DGCS_VERR | + UNIVERSE_DGCS_P_ERR)) { + error = -EIO; + /* Should we try to compute here the + * the remaining transfer count. + * It is the purpose of the remlen field + * but it's quite complex, not very useful, + * and left as an exercise. The printk + * later is often informative enough since + * this should only happen when debugging. + */ + } else { + /* This is necessarily DGCS_STOPPED since we don't use + * the DMA halt capability. The only posible cause + * right now is timeout, but this could change + * if DMA priority queues are implemented. + */ + error = -ETIME; + } + + } + + /* The handler has to be called now with the lock free so that it + * can queue further DMA operations. Queued DMA operations will not + * be started until all the interrupts have been handled to limit + * conflicts on VME accesses. + */ + if (dma) { + if (error) { + printk("DMA transfer error: status %x, " + " count=%d, packet @%p.\n", + (dgcs>>8) &0xff, GET_REG(DTBC), + bus_to_virt(GET_REG(DCPP))); + } + dma->error = error; + dma->handler(dma); + if (!test_bit(VME_DMA_READY, &dma->flags)) + wake_up(&lp->dma_free_wait); + } + spin_lock(&lp->dma_lock); + clear_bit(IRQ_ACTIVE, &lp->state); + if (!test_bit(DMA_ACTIVE, &lp->state) && (dma=lp->dma_queue)) { + /* Start the next DMA operation. */ + SET_REG(0, DTBC); + SET_REG(virt_to_bus(dma->private), DCPP); + iobarrier_w(); + SET_REG(universe.cached_dgcs | UNIVERSE_DGCS_GO, DGCS); + lp->dma_timer.expires = jiffies + dma->timeout; + add_timer(&lp->dma_timer); + set_bit(DMA_ACTIVE, &lp->state); + } + spin_unlock(&lp->dma_lock); +} + +/* This routine is invoked by timer_bh, with interrupts enabled */ +static void dma_timeout(u_long data) { + struct private *lp= (struct private *)data; + volatile u_char *__rp = lp->reg_base; + spin_lock_irq(&lp->dma_lock); + /* On UP it is possible that this code get interrupted by the + * DMA termination event. This probably means that the timeout + * value was too short, but should at least be handled gracefully. + * The DMA for which this timeout was intended might just have been + * removed from the queue and maybe the following one in the queue + * has been started. It's even more complex on SMP, but unless I've + * missed a weird sequence of events, this is correctly handled by + * the preceding spinlock and by checking that the DMA is still + * active and that the expiration time is not in the future. + */ + if (test_bit(DMA_ACTIVE, &lp->state) && + time_after_eq(jiffies, lp->dma_timer.expires)) { + SET_REG(universe.cached_dgcs | UNIVERSE_DGCS_STOP_REQ, DGCS); + /* set_bit(DMA_TIMEDOUT,&lp->state); */ + printk(KERN_NOTICE "Universe VME DMA timeout on device %s!", + lp->dma_queue->device->name); + } + spin_unlock_irq(&lp->dma_lock); +} + + + + + + +static int +get_vme_dev_info(char *buf, char **start, off_t pos, int count, int wr) +{ + struct vme_device *dev = &vme_root; + off_t at = 0; + int len, cnt; + + cnt = 0; + read_lock(&universe.lock); + while (dev && count > cnt) { + if (dev->fops) { + len = sprintf(buf, "%4d\t%s\n", + dev->minor, + dev->name); + } else { + len = sprintf(buf, "\t%s\n", + dev->name); + } + at += len; + if (at >= pos) { + if (!*start) { + *start = buf + (pos - (at - len)); + cnt = at - pos; + } else + cnt += len; + buf += len; + } + dev = dev->next; + } + read_unlock(&universe.lock); + return (count > cnt) ? cnt : count; +} + +static const char * const am_str[]={ + "A64 MBLT", + "A64 DATA", + "A64 PROG", + "A64 BLT", + "A64 MBLT PRIV", + "A64 DATA PRIV", + "A64 PROG PRIV", + "A64 BLT PRIV", + "A32 MBLT", + "A32 DATA", + "A32 PROG", + "A32 BLT", + "A32 MBLT PRIV", + "A32 DATA PRIV", + "A32 PROG PRIV", + "A32 BLT PRIV", + "USER 0x10", + "USER 0x11", + "USER 0x12", + "USER 0x13", + "USER 0x14", + "USER 0x15", + "USER 0x16", + "USER 0x17", + "USER 0x18", + "USER 0x19", + "USER 0x1A", + "USER 0x1B", + "USER 0x1C", + "USER 0x1D", + "USER 0x1E", + "USER 0x1F", + "6U 2eBLT", + "3U 2eBLT", + [0x22 ... 0x28] = "?", + "A16", + [0x2a ... 0x2c] = "?", + "A16 PRIV", + "?", + "CR/CSR", + [0x30 ... 0x37] = "?", + "A24 MBLT", + "A24 DATA", + "A24 PROG", + "A24 BLT", + "A24 MBLT PRIV", + "A24 DATA PRIV", + "A24 PROG PRIV", + "A24 BLT PRIV" +}; + +static const char * const dw_str[]={"?", "D08", "D16", "?", "D32", + [5 ... 7] = "?", "D64", [9 ... 15] = "?"}; + +static const char * const map_str[]={"", ",PRM", ",???", ",DYN"}; + +static int +get_vme_reg_info(char *buf, char **start, off_t pos, int count, int wr) +{ + struct vme_device *dev = &vme_root; + off_t at = 0; + int len, cnt; + char sflags[24]; + + strcpy(buf, "Address space\tWidth and flags\tRange\t\t\tDevice\n"); + at = cnt = strlen(buf); + buf += cnt; + + /* Flags: PRM/DYN,DMA,RMW */ + read_lock(&universe.lock); + while (dev && count > cnt) { + struct vme_region *p = dev->regions; + while(p && count>cnt) { + strcpy(sflags, dw_str[(p->flags&VME_DW_MASK) + >>VME_DW_SHIFT]); + strcpy(sflags+3, map_str[p->flags & + (VME_USE_MAP|VME_USE_PIO)]); + if (p->flags & VME_USE_DMA) + strcat(sflags, ",DMA"); + if (p->flags & VME_USE_RMW) + strcat(sflags, ",RMW"); + if (strlen(sflags)<8) strcat(sflags, "\t"); + len = sprintf(buf, " %-14s %s\t%08lx-%08lx\t%s\n", + am_str[(p->flags&VME_AM_MASK) + >>VME_AM_SHIFT], + sflags, + p->base, + p->limit, + p->device->name); + at += len; + if (at >= pos) { + if (!*start) { + *start = buf + (pos - (at - len)); + cnt = at - pos; + } else + cnt += len; + buf += len; + } + p = p->next; + } + dev = dev->next; + } + read_unlock(&universe.lock); + return (count > cnt) ? cnt : count; +} + + +static int +get_vme_int_info(char *buf, char **start, off_t pos, int count, int wr) +{ + struct vme_device *dev = &vme_root; + off_t at; + int len, cnt, i; + + strcpy(buf, "Level Vector: Count\tDevice/Interrupt\n"); + at = cnt = strlen(buf); + buf += cnt; + + read_lock(&universe.lock); + while (dev && count > cnt) { + struct vme_interrupt *p = dev->interrupts; + while(p && count>cnt) { + len = sprintf(buf, "%5d%7d:%11ld\t%s/%s\n", + p->level, + p->vector, + p->count, + p->device->name, + p->name); + at += len; + if (at >= pos) { + if (!*start) { + *start = buf + (pos - (at - len)); + cnt = at - pos; + } else + cnt += len; + buf += len; + } + p = p->next; + } + dev = dev->next; + } + for (i=1; i<8 && count > cnt; i++) { + if (!(universe.virq[i-1].errors || + universe.virq[i-1].unhandled)) continue; + len = 0; + if (universe.virq[i-1].errors) { + len = sprintf(buf, + "%5d Error:%11ld\tuniverse/iackerror\n", + i, universe.virq[i-1].errors); + } + if (universe.virq[i-1].unhandled) { + len += sprintf(buf+len, + "%5d Error:%11ld\tuniverse/unhandled\n", + i, universe.virq[i-1].unhandled); + } + at += len; + if (at >= pos) { + if (!*start) { + *start = buf + (pos - (at - len)); + cnt = at - pos; + } else + cnt += len; + buf += len; + } + } + +#if defined(UNIVERSE_DEBUG) + if (count > cnt) { + u_char tmpstr[16], *p=tmpstr; + strcpy(tmpstr, "none"); + for (i=1; i<7; i++) { + if (universe.cached_lint_en & (1<= pos) { + if (!*start) { + *start = buf + (pos - (at - len)); + cnt = at - pos; + } else + cnt += len; + buf += len; + } + } +#endif + read_unlock(&universe.lock); + return (count > cnt) ? cnt : count; +} + + +static struct proc_dir_entry proc_bus_vme_dir = { + PROC_BUS_VME, 3, "vme", + S_IFDIR|S_IRUGO|S_IXUGO, 2, 0, 0, + 0, NULL, + NULL +}; + +static struct proc_dir_entry proc_vme_devices = { + PROC_BUS_VME_DEVICES, 7, "devices", + S_IFREG | S_IRUGO, 1, 0, 0, + 0, NULL, + get_vme_dev_info +}; + +static struct proc_dir_entry proc_vme_regions = { + PROC_BUS_VME_REGIONS, 7, "regions", + S_IFREG | S_IRUGO, 1, 0, 0, + 0, NULL, + get_vme_reg_info +}; + +static struct proc_dir_entry proc_vme_interrupts = { + PROC_BUS_VME_INTERRUPTS, 10, "interrupts", + S_IFREG | S_IRUGO, 1, 0, 0, + 0, NULL, + get_vme_int_info +}; + +#ifdef CONFIG_PROC_FS +static __init void vme_proc_init(void) +{ + proc_register(proc_bus, &proc_bus_vme_dir); + proc_register(&proc_bus_vme_dir, &proc_vme_devices); + proc_register(&proc_bus_vme_dir, &proc_vme_regions); + proc_register(&proc_bus_vme_dir, &proc_vme_interrupts); +} +#else +#define vme_proc_init(); +#endif + +#ifdef MODULE +#define universe_init init_module +#endif + +#ifdef MODULE +void cleanup_module(void) { + volatile u_char *__rp = universe.reg_base; + struct vme_region *reg; +#ifdef CONFIG_PROC_FS + proc_unregister(&proc_bus_vme_dir, proc_vme_interrupts.low_ino); + proc_unregister(&proc_bus_vme_dir, proc_vme_regions.low_ino); + proc_unregister(&proc_bus_vme_dir, proc_vme_devices.low_ino); + proc_unregister(proc_bus, proc_bus_vme_dir.low_ino); + +#endif + /* mask all interrupts, disable enabled images (SLSI) */ + SET_REG(0, LINT_EN); + SET_REG(0, VINT_EN); + + dprintk("Pending PCI interrupts: %x.\n", GET_REG(LINT_STAT)); + + /* Unmap all mapped areas: note that unmapping the + * SLSI unmaps all 8 regions it covers, and that these are the last + * in the list of root regions, hence the loop break condition. + */ + for (reg=vme_root.regions; reg; reg=reg->next) { + iounmap((void *)reg->kvaddr); + if (reg == universe.root_regions+8) break; + } + vme_root.regions = NULL; + free_irq(universe.pci->irq, &universe); + /* Unmap image 0 and registers */ + iounmap((void *)universe.root_regions[0].kvaddr); + iounmap((void *)universe.reg_base); + + unregister_chrdev(UNIVERSE_MAJOR, "vme"); +} +#endif + +int __init universe_init(void) +{ + unsigned short pci_command=PCI_COMMAND_MEMORY|PCI_COMMAND_MASTER; + u32 tmp; + int i, rb, error; + volatile u_char * __rp; + + universe.pci = pci_find_device( PCI_VENDOR_ID_TUNDRA, + PCI_DEVICE_ID_TUNDRA_CA91C042, + NULL); + if (!universe.pci) return -ENODEV; + pci_read_config_byte(universe.pci, PCI_REVISION_ID, + &universe.revision); + printk(KERN_INFO + "Universe VME bridge revision #%d found at bus=%d, dev=%d.\n", + universe.revision, universe.pci->bus->number, + PCI_SLOT(universe.pci->devfn)); + + /* It is unfortunate but it is necessary to enable the Universe on + * the PCI bus to be able to control all the slave images, which can + * cause temporary conflicts in case of bad programming. This is the + * job of the system firmware/BIOS whatever to ensure that these + * conflicts do not happen. + */ + /* FIXME: + * It's quite a mess here: it works only on PPC for now if + * the Universe is revision 1 and the base address is mapped + * in I/O space. Anyway this area needs 64kB so it can only + * be mapped in memory space on Intel machines. And for + * revision 2, one base is in I/O space and the other in + * memory so we always chose the memory mapped one. + */ + +#define base universe.pci->base_address + if (universe.revision !=0 && + (base[0] & PCI_BASE_ADDRESS_SPACE)==PCI_BASE_ADDRESS_SPACE_IO) + rb = 1; + else + rb = 0; + + if ((base[rb] & PCI_BASE_ADDRESS_SPACE)==PCI_BASE_ADDRESS_SPACE_IO) { +#ifdef __powerpc__ + pci_command |= PCI_COMMAND_IO; + universe.reg_base = (u_char *)_IO_BASE + + (base[rb] & PCI_BASE_ADDRESS_IO_MASK); + printk(KERN_NOTICE + "Assuming uniform PCI addressing (non PreP)!\n"); +#else + printk(KERN_ERR "Don't know how to address I/O space.\n"); +#endif + } else { + pci_read_config_dword(universe.pci, + PCI_BASE_ADDRESS_0 + 4*rb, + & tmp); + universe.bus_delta = base[rb] - tmp; + universe.reg_base = + ioremap(base[rb] & PCI_BASE_ADDRESS_MEM_MASK, 0x1000); +#undef base + } + __rp = universe.reg_base; + + if (!__rp) { + printk(KERN_ERR "Can't access universe registers !\n"); + return -ENOMEM; + } + + /* Enable the Universe on the PCI bus. */ + pci_write_config_word(universe.pci, PCI_COMMAND, pci_command); + + /* Clear the SYSFAIL bit and corresponding interrupt if set */ + if(GET_REG(VCSR_CLR)&UNIVERSE_CSR_SYSFAIL) { + SET_REG(UNIVERSE_CSR_SYSFAIL, VCSR_CLR); + SET_REG(UNIVERSE_LINT_SYSFAIL, LINT_STAT); + } + + /* Clear PCI and VME error logs (note that they are only used by + * posted write cycles). + */ + + tmp = GET_REG(LERRLOG); + if(tmp & UNIVERSE_LERRLOG_VALID){ + printk(KERN_INFO "PCI error log cleared, error CMD=%x , " + "multiple= %d, address=%x.\n", + (tmp>>UNIVERSE_LERRLOG_CMD_SHIFT)& + UNIVERSE_LERRLOG_CMD_MASK, + (tmp&UNIVERSE_LERRLOG_MULTIPLE)!=0, + GET_REG(LAERR)); + SET_REG(UNIVERSE_LERRLOG_VALID, LERRLOG); + } + + tmp = GET_REG(VERRLOG); + if(tmp & UNIVERSE_VERRLOG_VALID){ + printk(KERN_INFO "VME error log cleared, error AM=%d, " + "IACK=%d, multiple=%d, address=%x.\n", + (tmp>>UNIVERSE_VERRLOG_AM_SHIFT)& + UNIVERSE_VERRLOG_AM_MASK, + (tmp&UNIVERSE_VERRLOG_IACK)!=0, + (tmp&UNIVERSE_VERRLOG_MULTIPLE)!=0, + GET_REG(VAERR)); + SET_REG(UNIVERSE_VERRLOG_VALID, VERRLOG); + } + + /* Disable all interrupts map all VME interrupts to PCI interrupt 0. */ + + SET_REG(0, LINT_EN); + SET_REG(0, VINT_EN); + SET_REG(0, LINT_MAP0); + SET_REG(0, LINT_MAP1); + SET_REG(0, VINT_MAP0); + SET_REG(0, VINT_MAP1); + + /* Clear pending interrupts by writing back_data status onto itself. */ + SET_REG(GET_REG(LINT_STAT), LINT_STAT); + /* SET_REG(GET_REG(VINT_STAT), VINT_STAT); */ + + /* Initialize DMA */ + SET_REG(0, D_LLUE); + tmp=GET_REG(DGCS); + if(tmp&UNIVERSE_DGCS_ACT) { + SET_REG(UNIVERSE_DGCS_STOP_REQ| + (tmp & (UNIVERSE_DGCS_CHAIN | + UNIVERSE_DGCS_VON_MASK | + UNIVERSE_DGCS_VOFF_MASK)), DGCS); + printk(KERN_WARNING + "Universe DMA active before initialization!\n"); + /* There should be some timeout here. */ + while(GET_REG(DGCS)&UNIVERSE_DGCS_ACT){ + udelay(10); /* Should wait a few us */ + } + } + + universe_setup(__rp); + + universe.cached_dgcs = GET_REG(DGCS); + + /* Now we analyze the contents of all the image registers and look + * for the enabled ones and store their characteristic in the + * root_regions array (4 for Universe I, 8 for Universe II), + * but first handle the special slave image. + */ +#define reg universe.root_regions + do { + u32 flags = 0, slsi = GET_REG(SLSI); + u_long paddr, vaddr=0; + if (!(slsi & UNIVERSE_SLAVE_EN)) break; + if ((slsi&UNIVERSE_SLAVE_LAS_MASK) != UNIVERSE_SLAVE_LAS_MEM) { + printk(KERN_ERR "Special slave image cannot be " + "memory mapped (LAS=%x)!", + slsi&UNIVERSE_SLAVE_LAS_MASK); + break; + } + + paddr = (((slsi & UNIVERSE_SLSI_BS_MASK) + << UNIVERSE_SLSI_BS_ADDR_SHIFT) + + universe.bus_delta); + if (permanent_maps & 1) { + + vaddr = (u_long) ioremap(paddr, 0x4000000); + if(!vaddr) { + printk(KERN_WARNING + "Can't ioremap() special slave " + "image: lack of free virtual kernel " + "space ?\n"); + } + else { + flags = VME_USE_PIO; + } + } + + /* Register backwards because it's easier to insert at head */ + for (i=15; i>=8; i--) { + u_long attr = slsi>>((i&7)>>1); + u_long delta = ((i&6) << 23) + (i&1) * 0xff0000; + reg[i].phyaddr = paddr + delta; + reg[i].kvaddr = vaddr ? + ((volatile u_char *) (vaddr + delta)) : NULL; + reg[i].flags = flags | slsi2am[i&1] + [(attr>>UNIVERSE_SLSI_SPACE_SHIFT)&1] + [(attr>>UNIVERSE_SLSI_PRIV_SHIFT)&1] | + VME_DW(16<<((attr&UNIVERSE_SLSI_VDW_32) + >>UNIVERSE_SLSI_VDW_SHIFT)); + reg[i].next = vme_root.regions; + vme_root.regions = reg+i; + } + } while(0); + + /* Note: image 0 is reserved for atomic and safe accesses. */ + for (i=universe.revision ? 7:3; i>0; i--) { + u32 pcibase, pcilimit, offset, ctl, base, limit, vmask, flags; + const struct ctl2am_t * p; + ctl = GET_REG(LSI_CTL(i)); + if (!(ctl&UNIVERSE_SLAVE_EN)) continue; + pcibase = GET_REG(LSI_BS(i)); + pcilimit = GET_REG(LSI_BD(i)) - 1; + offset = GET_REG(LSI_TO(i)); + + if (pcilimit < pcibase) { + printk(KERN_NOTICE + "Local slave image %d is enabled but " + "its address range (%x:%x) is empty!\n", + i, pcibase, pcilimit); + continue; + } + + if ((ctl&UNIVERSE_SLAVE_LAS_MASK) != UNIVERSE_SLAVE_LAS_MEM) { + printk(KERN_ERR "Local slave image %d is not in " + "memory space (LAS=%x)!\n", i, + (ctl & UNIVERSE_SLAVE_LAS_MASK) + >> UNIVERSE_SLAVE_LAS_SHIFT); + continue; + } + /* Translate the Universe ctl to a more sensible AM value */ + for(p=ctl2am; ((p->value ^ ctl) & p->mask); p++); + if (p->flags == ~0U) { + printk(KERN_ERR + "Local slave image %d has an invalid or " + "unsupported attribute combination: %x\n", + i, ctl); + continue; + } + flags = p->flags | VME_DW(8 << ((ctl&UNIVERSE_VDW_MASK) + >> UNIVERSE_VDW_SHIFT)); + vmask = vas_masks[(ctl & UNIVERSE_VAS_MASK) >> + UNIVERSE_VAS_SHIFT]; + reg[i].base = base = (pcibase+offset) & vmask; + reg[i].limit = limit = base + pcilimit - pcibase; + if ((limit < base) || + (limit > vmask)) { + printk(KERN_ERR + "Image %d wraps around or exceeds the limit" + "of its VME address space: %x, %x, %x\n", + i, ctl, base, limit); + continue; + + } + reg[i].phyaddr = pcibase + universe.bus_delta; + + if (permanent_maps & (1<irq, irq_action, + SA_INTERRUPT, vme_root.name, &universe); + if(error) { + printk(KERN_ERR + "Unable to grab interrupt %d for VME !\n", + universe.pci->irq); + break; + } + + error = register_chrdev(UNIVERSE_MAJOR,"vme",&universe_fops); + if (error) { + dprintk("can't get Major %d\n", UNIVERSE_MAJOR); + break; + } + + /* Enable the interrupts: individual VME IRQ are only enabled + * if at least one handler is attached. + */ + SET_REG(universe.cached_lint_en, LINT_EN); + +#ifdef UNIVERSE_DEBUG + print_registers(); +#endif + vme_proc_init(); + return 0; + } while(0); +#ifdef MODULE + cleanup_module(); +#endif + return error; +} + Binary files linux-2.2.12/drivers/vme/universe.dvi and linux/drivers/vme/universe.dvi differ diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/universe.h linux/drivers/vme/universe.h --- linux-2.2.12/drivers/vme/universe.h Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/universe.h Mon Nov 15 13:23:01 1999 @@ -0,0 +1,466 @@ +/* Definitions of the Universe registers: + * note that the first 256 bytes can also be accessed from configuration + * space and use standard PCI names which are not repeated here. + * G.Paubert, 1997-1999. + */ + +#ifndef _UNIVERSE_H_ +#define _UNIVERSE_H_ + +#include +#define UNIVERSE_MAJOR 60 /* Experimental major number */ + +/* Standard PCI header */ +#define UNIVERSE_PCI_STATUS PCI_STATUS + +/* General purpose PCI slave images (0 to 3 or 7 depending on revision) */ +#define UNIVERSE_LSI_CTL(i) (0x100+((i)&3)*0x14+((i)&4)*0x28) +#define UNIVERSE_LSI_BS(i) (0x104+((i)&3)*0x14+((i)&4)*0x28) +#define UNIVERSE_LSI_BD(i) (0x108+((i)&3)*0x14+((i)&4)*0x28) +#define UNIVERSE_LSI_TO(i) (0x10C+((i)&3)*0x14+((i)&4)*0x28) + +/* Special cycles control (locked RMW,...) */ +#define UNIVERSE_SCYC_CTL 0x170 +#define UNIVERSE_SCYC_ADDR 0x174 +#define UNIVERSE_SCYC_EN 0x178 +#define UNIVERSE_SCYC_CMP 0x17C +#define UNIVERSE_SCYC_SWP 0x180 + +/* Miscellaneous, special slave image and PCI errorlog */ +#define UNIVERSE_LMISC 0x184 +#define UNIVERSE_SLSI 0x188 +#define UNIVERSE_LERRLOG 0x18C /* Not same name as Universe doc */ +#define UNIVERSE_LAERR 0x190 + +/* DMA */ +#define UNIVERSE_DCTL 0x200 +#define UNIVERSE_DTBC 0x204 +#define UNIVERSE_DLA 0x208 +#define UNIVERSE_DVA 0x210 +#define UNIVERSE_DCPP 0x218 +#define UNIVERSE_DGCS 0x220 +#define UNIVERSE_D_LLUE 0x224 + +/* Interrupts */ +#define UNIVERSE_LINT_EN 0x300 +#define UNIVERSE_LINT_STAT 0x304 +#define UNIVERSE_LINT_MAP0 0x308 +#define UNIVERSE_LINT_MAP1 0x30C +#define UNIVERSE_LINT_MAP2 0x340 /* Revision 2 only */ +#define UNIVERSE_VINT_EN 0x310 +#define UNIVERSE_VINT_STAT 0x314 +#define UNIVERSE_VINT_MAP0 0x318 +#define UNIVERSE_VINT_MAP1 0x31C +#define UNIVERSE_VINT_MAP2 0x344 /* Revision 2 only */ +#define UNIVERSE_STATID 0x320 +#define UNIVERSE_V1_STATID 0x324 +#define UNIVERSE_V2_STATID 0x328 +#define UNIVERSE_V3_STATID 0x32C +#define UNIVERSE_V4_STATID 0x330 +#define UNIVERSE_V5_STATID 0x334 +#define UNIVERSE_V6_STATID 0x338 +#define UNIVERSE_V7_STATID 0x33C + +#define UNIVERSE_MBOX0 0x348 /* Revision 2 only */ +#define UNIVERSE_MBOX1 0x34C /* Revision 2 only */ +#define UNIVERSE_MBOX2 0x350 /* Revision 2 only */ +#define UNIVERSE_MBOX3 0x354 /* Revision 2 only */ +#define UNIVERSE_SEMA0 0x348 /* Revision 2 only */ +#define UNIVERSE_SEMA1 0x34C /* Revision 2 only */ + + +/* General control and status, VME arbitration ... */ +#define UNIVERSE_MAST_CTL 0x400 +#define UNIVERSE_MISC_CTL 0x404 +#define UNIVERSE_MISC_STAT 0x408 +#define UNIVERSE_USER_AM 0x40C + +/* General purpose VME slave images (0 to 3 or 7 depending on revision) */ +#define UNIVERSE_VSI_CTL(i) (0xF00+((i)&3)*0x14+((i)&4)*0x24) +#define UNIVERSE_VSI_BS(i) (0xF04+((i)&3)*0x14+((i)&4)*0x24) +#define UNIVERSE_VSI_BD(i) (0xF08+((i)&3)*0x14+((i)&4)*0x24) +#define UNIVERSE_VSI_TO(i) (0xF0C+((i)&3)*0x14+((i)&4)*0x24) + + +/* Location monitor (revision 2 only) */ +#define UNIVERSE_LM_CTL 0xF64 +#define UNIVERSE_LM_BS 0xF68 + + +/* Universe register access from VMEBus */ +#define UNIVERSE_VRAI_CTL 0xF70 +#define UNIVERSE_VRAI_BS 0xF74 + +/* VME CS/CSR space to PCI mapping */ +#define UNIVERSE_VCSR_CTL 0xF80 +#define UNIVERSE_VCSR_TO 0xF84 + +/* VME errorlog */ +#define UNIVERSE_VERRLOG 0xF88 /* Not same name as Universe doc */ +#define UNIVERSE_VAERR 0xF8C + +/* VME CSR standard registers */ +#define UNIVERSE_VCSR_CLR 0xFF4 +#define UNIVERSE_VCSR_SET 0xFF8 +#define UNIVERSE_VCSR_BS 0xFFC + +/* Now definitions of the bits in the previously defined registers */ + +/* Control bits used by slave (PCI and VME) images and DMA (DCTL). */ +#define UNIVERSE_SLAVE_EN 0x80000000 /* VME and PCI slaves only */ +#define UNIVERSE_DMA_L2V 0x80000000 /* DMA only */ +#define UNIVERSE_DMA_L2V_SHIFT 31 /* DMA only */ +#define UNIVERSE_SLAVE_PWEN 0x40000000 /* VME and PCI slaves only */ +#define UNIVERSE_VSI_PREN 0x20000000 /* VME slaves only */ +#define UNIVERSE_VDW_MASK 0x00C00000 /* PCI slaves and DMA only */ +#define UNIVERSE_VDW_SHIFT 22 +#define UNIVERSE_VDW_8 0x00000000 /* PCI slaves and DMA only */ +#define UNIVERSE_VDW_16 0x00400000 /* PCI slaves and DMA only */ +#define UNIVERSE_VDW_32 0x00800000 /* PCI slaves and DMA only */ +#define UNIVERSE_VDW_64 0x00C00000 /* PCI slaves and DMA only */ + +/* The following 6 names are perhaps not the best choice, but + * at least now they are consistent with the special slave image, and + * they must be different from the PCI slave and DMA later. + */ +#define UNIVERSE_VSI_SPACE_MASK 0x00400000 /* VME slaves only */ +#define UNIVERSE_VSI_SPACE_DATA 0x00400000 /* VME slaves only */ +#define UNIVERSE_VSI_SPACE_PROGRAM 0x00800000 /* VME slaves only */ +#define UNIVERSE_VSI_SPACE_ANY 0x00C00000 /* VME slaves only */ + +#define UNIVERSE_VSI_PRIV_MASK 0x00010000 /* VME slaves only */ +#define UNIVERSE_VSI_PRIV_USER 0x00010000 /* VME slaves only */ +#define UNIVERSE_VSI_PRIV_SUPER 0x00020000 /* VME slaves only */ +#define UNIVERSE_VSI_PRIV_ANY 0x00030000 /* VME slaves only */ + +#define UNIVERSE_VAS_MASK 0x00070000 +#define UNIVERSE_VAS_SHIFT 16 +#define UNIVERSE_VAS_A16 0x00000000 +#define UNIVERSE_VAS_A24 0x00010000 +#define UNIVERSE_VAS_A32 0x00020000 +#define UNIVERSE_VAS_CRCSR 0x00050000 /* PCI slaves only */ +#define UNIVERSE_VAS_USER1 0x00060000 +#define UNIVERSE_VAS_USER2 0x00070000 + +#define UNIVERSE_SPACE_MASK 0x00004000 /* PCI slaves and DMA only */ +#define UNIVERSE_SPACE_DATA 0x00000000 /* PCI slaves and DMA only */ +#define UNIVERSE_SPACE_PROGRAM 0x00004000 /* PCI slaves and DMA only */ + +#define UNIVERSE_PRIV_MASK 0x00001000 /* PCI slaves and DMA only */ +#define UNIVERSE_PRIV_USER 0x00000000 /* PCI slaves and DMA only */ +#define UNIVERSE_PRIV_SUPER 0x00001000 /* PCI slaves and DMA only */ + +#define UNIVERSE_BLT_MASK 0x00000100 /* PCI slaves and DMA only */ +#define UNIVERSE_NOBLT 0x00000000 /* PCI slaves and DMA only */ +#define UNIVERSE_BLT 0x00000100 /* PCI slaves and DMA only */ + +#define UNIVERSE_USEPCI64 0x00000080 /* VME slaves and DMA only */ +#define UNIVERSE_VSI_PCILOCK 0x00000040 /* VME slaves only */ + +/* Note that LAS_CFG was allowed in first revision of the chip + * for PCI slave images. + */ +#define UNIVERSE_SLAVE_LAS_MASK 0x00000003 /* VME and PCI slaves only */ +#define UNIVERSE_SLAVE_LAS_SHIFT 0 +#define UNIVERSE_SLAVE_LAS_MEM 0x00000000 /* VME and PCI slaves only */ +#define UNIVERSE_SLAVE_LAS_IO 0x00000001 /* VME and PCI slaves only */ +#define UNIVERSE_SLAVE_LAS_CFG 0x00000002 /* VME slaves only */ + +/* Special cycle control register */ +#define UNIVERSE_SCYC_DISABLE 0x00000000 +#define UNIVERSE_SCYC_RMW 0x00000001 +#define UNIVERSE_SCYC_ADOH 0x00000002 +#define UNIVERSE_SCYC_IO 0x00000004 /* Revision 2 only */ + +/* Miscellaneous, note that the CRT value is ignored on revision 2 + * and is fixed at 2^15 PCI clock cycles (983uS), so it will be set to + * 1024uS on revision 1 to be as independant of chip revision as possible. + */ +#define UNIVERSE_CRT_MASK 0xF0000000 +#define UNIVERSE_CRT_SHIFT 28 +#define UNIVERSE_CRT_INFINITE 0x00000000 +#define UNIVERSE_CRT_1024uS 0x40000000 +#define UNIVERSE_CWT_MASK 0x0F000000 +#define UNIVERSE_CWT_SHIFT 24 +#define UNIVERSE_CWT_DISABLE 0x00000000 +#define UNIVERSE_CWT_16 0x01000000 +#define UNIVERSE_CWT_32 0x02000000 +#define UNIVERSE_CWT_64 0x03000000 +#define UNIVERSE_CWT_128 0x04000000 + +/* Special slave image */ +#define UNIVERSE_SLSI_VDW_MASK 0x00F00000 +#define UNIVERSE_SLSI_VDW_SHIFT 20 +#define UNIVERSE_SLSI_VDW_32 0x00100000 + +#define UNIVERSE_SLSI_SPACE_MASK 0x0000F000 +#define UNIVERSE_SLSI_SPACE_SHIFT 12 +#define UNIVERSE_SLSI_SPACE_PROGRAM 0x00001000 + +#define UNIVERSE_SLSI_PRIV_MASK 0x00000F00 +#define UNIVERSE_SLSI_PRIV_SHIFT 8 +#define UNIVERSE_SLSI_PRIV_SUPER 0x00000100 + +#define UNIVERSE_SLSI_BS_MASK 0x000000FC +#define UNIVERSE_SLSI_BS_SHIFT 2 +#define UNIVERSE_SLSI_BS_ADDR_SHIFT 24 + +/* PCI error log */ +#define UNIVERSE_LERRLOG_VALID 0x00800000 +#define UNIVERSE_LERRLOG_MULTIPLE 0x08000000 +#define UNIVERSE_LERRLOG_CMD_MASK 0xF0000000 +#define UNIVERSE_LERRLOG_CMD_SHIFT 28 + +/* DMA General control register */ +#define UNIVERSE_DGCS_GO 0x80000000 +#define UNIVERSE_DGCS_STOP_REQ 0x40000000 +#define UNIVERSE_DGCS_HALT_REQ 0x20000000 +#define UNIVERSE_DGCS_CHAIN 0x08000000 +#define UNIVERSE_DGCS_VON_MASK 0x00700000 /* Was 0x00f00000 in rev. 1 */ +#define UNIVERSE_DGCS_VON_SHIFT 20 +#define UNIVERSE_DGCS_VON_INFINITE 0x00000000 +#define UNIVERSE_DGCS_VON_256 0x00100000 +#define UNIVERSE_DGCS_VON_512 0x00200000 +#define UNIVERSE_DGCS_VON_1024 0x00300000 +#define UNIVERSE_DGCS_VON_2048 0x00400000 +#define UNIVERSE_DGCS_VON_4096 0x00500000 +#define UNIVERSE_DGCS_VON_8192 0x00600000 +#define UNIVERSE_DGCS_VON_16384 0x00700000 +#define UNIVERSE_DGCS_VOFF_MASK 0x000F0000 +#define UNIVERSE_DGCS_VOFF_SHIFT 16 +#define UNIVERSE_DGCS_VOFF_0uS 0x00000000 +#define UNIVERSE_DGCS_VOFF_16uS 0x00010000 +#define UNIVERSE_DGCS_VOFF_32uS 0x00020000 +#define UNIVERSE_DGCS_VOFF_64uS 0x00030000 +#define UNIVERSE_DGCS_VOFF_128uS 0x00040000 +#define UNIVERSE_DGCS_VOFF_256uS 0x00050000 +#define UNIVERSE_DGCS_VOFF_512uS 0x00060000 +#define UNIVERSE_DGCS_VOFF_1024uS 0x00070000 +#define UNIVERSE_DGCS_ACT 0x00008000 +#define UNIVERSE_DGCS_STOPPED 0x00004000 +#define UNIVERSE_DGCS_HALTED 0x00002000 +#define UNIVERSE_DGCS_DONE 0x00000800 +#define UNIVERSE_DGCS_LERR 0x00000400 +#define UNIVERSE_DGCS_VERR 0x00000200 +#define UNIVERSE_DGCS_P_ERR 0x00000100 +#define UNIVERSE_DGCS_INT_STOP 0x00000040 +#define UNIVERSE_DGCS_INT_HALT 0x00000020 +#define UNIVERSE_DGCS_INT_DONE 0x00000008 +#define UNIVERSE_DGCS_INT_LERR 0x00000004 +#define UNIVERSE_DGCS_INT_VERR 0x00000002 +#define UNIVERSE_DGCS_INT_P_ERR 0x00000001 +#define UNIVERSE_DGCS_INTMASKS 0x0000006F + +/* DMA on the fly linked list update */ +#define UNIVERSE_DMA_UPDATE 0x80000000 + +/* PCI interrupt enable and status registers */ +#define UNIVERSE_LINT_LM3 0x00800000 /* Revision 2 only */ +#define UNIVERSE_LINT_LM2 0x00400000 /* Revision 2 only */ +#define UNIVERSE_LINT_LM1 0x00200000 /* Revision 2 only */ +#define UNIVERSE_LINT_LM0 0x00100000 /* Revision 2 only */ +#define UNIVERSE_LINT_MBOX3 0x00080000 /* Revision 2 only */ +#define UNIVERSE_LINT_MBOX2 0x00040000 /* Revision 2 only */ +#define UNIVERSE_LINT_MBOX1 0x00020000 /* Revision 2 only */ +#define UNIVERSE_LINT_MBOX0 0x00010000 /* Revision 2 only */ +#define UNIVERSE_LINT_ACFAIL 0x00008000 +#define UNIVERSE_LINT_SYSFAIL 0x00004000 +#define UNIVERSE_LINT_SW_INT 0x00002000 +#define UNIVERSE_LINT_SW_IACK 0x00001000 +#define UNIVERSE_LINT_VERR 0x00000400 +#define UNIVERSE_LINT_LERR 0x00000200 +#define UNIVERSE_LINT_DMA 0x00000100 +#define UNIVERSE_LINT_VIRQ7 0x00000080 +#define UNIVERSE_LINT_VIRQ6 0x00000040 +#define UNIVERSE_LINT_VIRQ5 0x00000020 +#define UNIVERSE_LINT_VIRQ4 0x00000010 +#define UNIVERSE_LINT_VIRQ3 0x00000008 +#define UNIVERSE_LINT_VIRQ2 0x00000004 +#define UNIVERSE_LINT_VIRQ1 0x00000002 +#define UNIVERSE_LINT_VIRQS 0x000000FE +#define UNIVERSE_LINT_VOWN 0x00000001 + +/* PCI interrupt mapping registers */ +#define UNIVERSE_LINT_MAP_MASK 7 +#define UNIVERSE_LINT_VIRQ7_SHIFT 28 +#define UNIVERSE_LINT_VIRQ6_SHIFT 24 +#define UNIVERSE_LINT_VIRQ5_SHIFT 20 +#define UNIVERSE_LINT_VIRQ4_SHIFT 16 +#define UNIVERSE_LINT_VIRQ3_SHIFT 12 +#define UNIVERSE_LINT_VIRQ2_SHIFT 8 +#define UNIVERSE_LINT_VIRQ1_SHIFT 4 +#define UNIVERSE_LINT_VOWN_SHIFT 0 +#define UNIVERSE_LINT_ACFAIL_SHIFT 28 +#define UNIVERSE_LINT_SYSFAIL_SHIFT 24 +#define UNIVERSE_LINT_SW_INT_SHIFT 20 +#define UNIVERSE_LINT_SW_IACK_SHIFT 16 +#define UNIVERSE_LINT_VERR_SHIFT 8 +#define UNIVERSE_LINT_LERR_SHIFT 4 +#define UNIVERSE_LINT_DMA_SHIFT 0 +#define UNIVERSE_LINT_LM3_SHIFT 28 +#define UNIVERSE_LINT_LM2_SHIFT 24 +#define UNIVERSE_LINT_LM1_SHIFT 20 +#define UNIVERSE_LINT_LM0_SHIFT 16 +#define UNIVERSE_LINT_MBOX3_SHIFT 12 +#define UNIVERSE_LINT_MBOX2_SHIFT 8 +#define UNIVERSE_LINT_MBOX1_SHIFT 4 +#define UNIVERSE_LINT_MBOX0_SHIFT 0 + +/* VME interrupt enable and status registers */ +#define UNIVERSE_VINT_VME_SW7 0x00800000 /* Revision 2 only */ +#define UNIVERSE_VINT_VME_SW6 0x00400000 /* Revision 2 only */ +#define UNIVERSE_VINT_VME_SW5 0x00200000 /* Revision 2 only */ +#define UNIVERSE_VINT_VME_SW4 0x00100000 /* Revision 2 only */ +#define UNIVERSE_VINT_VME_SW3 0x00080000 /* Revision 2 only */ +#define UNIVERSE_VINT_VME_SW2 0x00040000 /* Revision 2 only */ +#define UNIVERSE_VINT_VME_SW1 0x00020000 /* Revision 2 only */ +#define UNIVERSE_VINT_MBOX3 0x00080000 /* Revision 2 only */ +#define UNIVERSE_VINT_MBOX2 0x00040000 /* Revision 2 only */ +#define UNIVERSE_VINT_MBOX1 0x00020000 /* Revision 2 only */ +#define UNIVERSE_VINT_MBOX0 0x00010000 /* Revision 2 only */ +#define UNIVERSE_VINT_SW_INT 0x00001000 +#define UNIVERSE_VINT_VERR 0x00000400 +#define UNIVERSE_VINT_LERR 0x00000200 +#define UNIVERSE_VINT_DMA 0x00000100 +#define UNIVERSE_VINT_LINT7 0x00000080 +#define UNIVERSE_VINT_LINT6 0x00000040 +#define UNIVERSE_VINT_LINT5 0x00000020 +#define UNIVERSE_VINT_LINT4 0x00000010 +#define UNIVERSE_VINT_LINT3 0x00000008 +#define UNIVERSE_VINT_LINT2 0x00000004 +#define UNIVERSE_VINT_LINT1 0x00000002 +#define UNIVERSE_VINT_LINT0 0x00000001 + +/* VME interrupt mapping registers */ +#define UNIVERSE_VINT_MAP_MASK 7 +#define UNIVERSE_VINT_LINT7_SHIFT 28 +#define UNIVERSE_VINT_LINT6_SHIFT 24 +#define UNIVERSE_VINT_LINT5_SHIFT 20 +#define UNIVERSE_VINT_LINT4_SHIFT 16 +#define UNIVERSE_VINT_LINT3_SHIFT 12 +#define UNIVERSE_VINT_LINT2_SHIFT 8 +#define UNIVERSE_VINT_LINT1_SHIFT 4 +#define UNIVERSE_VINT_LINT0_SHIFT 0 +#define UNIVERSE_VINT_SW_INT_SHIFT 16 +#define UNIVERSE_VINT_VERR_SHIFT 8 +#define UNIVERSE_VINT_LERR_SHIFT 4 +#define UNIVERSE_VINT_DMA_SHIFT 0 +#define UNIVERSE_VINT_MBOX3_SHIFT 12 +#define UNIVERSE_VINT_MBOX2_SHIFT 8 +#define UNIVERSE_VINT_MBOX1_SHIFT 4 +#define UNIVERSE_VINT_MBOX0_SHIFT 0 + +/* VIRQx Status/Id */ +#define UNIVERSE_STATID_ERR 0x0100 +#define UNIVERSE_STATID_VECTOR_MASK 0x00FF + +/* Master control register */ +#define UNIVERSE_MAXRTRY_MASK 0xF0000000 +#define UNIVERSE_MAXRTRY_SHIFT 28 +#define UNIVERSE_MAXRTRY_INFINITE 0x00000000 +#define UNIVERSE_MAXRTRY_64 (((n)<<(UNIVERSE_MAXRTRY_SHIFT-6)) & \ + UNIVERSE_MAXRTRY_MASK) + +#define UNIVERSE_PWON_MASK 0x0F000000 +#define UNIVERSE_PWON_SHIFT 24 +#define UNIVERSE_PWON_128 0x00000000 +#define UNIVERSE_PWON_256 0x01000000 +#define UNIVERSE_PWON_512 0x02000000 +#define UNIVERSE_PWON_1024 0x03000000 +#define UNIVERSE_PWON_2048 0x04000000 +#define UNIVERSE_PWON_4096 0x05000000 + +#define UNIVERSE_VRL_MASK 0x00c00000 +#define UNIVERSE_VRL_SHIFT 22 +#define UNIVERSE_VRL(level) ((level)< +Babel and hyphenation patterns for american, french, german, ngerman, n +ohyphenation, loaded. +(/usr/share/texmf/tex/latex/base/article.cls +Document Class: article 1999/01/07 v1.4a Standard LaTeX document class +(/usr/share/texmf/tex/latex/base/size10.clo +File: size10.clo 1999/01/07 v1.4a Standard LaTeX file (size option) +) +\c@part=\count79 +\c@section=\count80 +\c@subsection=\count81 +\c@subsubsection=\count82 +\c@paragraph=\count83 +\c@subparagraph=\count84 +\c@figure=\count85 +\c@table=\count86 +\abovecaptionskip=\skip41 +\belowcaptionskip=\skip42 +\bibindent=\dimen102 +) (universe.aux) +\openout1 = `universe.aux'. + +LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 5. +LaTeX Font Info: ... okay on input line 5. +LaTeX Font Info: External font `cmex10' loaded for size +(Font) <12> on input line 6. +LaTeX Font Info: External font `cmex10' loaded for size +(Font) <8> on input line 6. +LaTeX Font Info: External font `cmex10' loaded for size +(Font) <6> on input line 6. +LaTeX Font Info: External font `cmex10' loaded for size +(Font) <7> on input line 45. +LaTeX Font Info: External font `cmex10' loaded for size +(Font) <5> on input line 45. + [1 + +] +LaTeX Font Info: Try loading font information for OMS+cmr on input line 86. + +(/usr/share/texmf/tex/latex/base/omscmr.fd +File: omscmr.fd 1998/03/27 v2.5g Standard LaTeX font definitions +) +LaTeX Font Info: Font shape `OMS/cmr/m/n' in size <10> not available +(Font) Font shape `OMS/cmsy/m/n' tried instead on input line 86. + [2] [3] [4] +Overfull \hbox (49.13593pt too wide) in paragraph at lines 282--287 +\OT1/cmr/m/n/10 struct vme[]region vme[]register[]region (and un-reg-is-ter) vm +e[]map[]region ? vme[]check[]region + [] + + +Overfull \hbox (35.76447pt too wide) in paragraph at lines 288--292 +[]\OT1/cmr/m/n/10 cheks are per-formed re-gard-less of the trans-fer type (M-BL +T/BLT/DATA/PROGRAM + [] + +[5] +Overfull \hbox (43.59401pt too wide) in paragraph at lines 392--396 +\OT1/cmr/m/n/10 Interrupt and D-MA ter-mi-na-tion han-dler-s can on-ly cal-l vm +e[]safe[]access, vme[]special[]access + [] + +[6] [7] [8] (universe.aux) ) +Here is how much of TeX's memory you used: + 247 strings out of 10898 + 2536 string characters out of 71919 + 52242 words of memory out of 263001 + 3237 multiletter control sequences out of 10000+0 + 8241 words of font info for 29 fonts, out of 200000 for 1000 + 15 hyphenation exceptions out of 1000 + 24i,8n,19p,175b,254s stack positions out of 300i,100n,500p,30000b,4000s + +Output written on universe.dvi (8 pages, 29184 bytes). diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/universe.tex linux/drivers/vme/universe.tex --- linux-2.2.12/drivers/vme/universe.tex Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/universe.tex Tue Apr 27 07:05:14 1999 @@ -0,0 +1,518 @@ +\documentclass[a4paper]{article} +\title{A Linux driver for Tundra's Universe (bridge between + PCI and VME buses)} +\author{Gabriel Paubert} +\begin{document} +\maketitle + +% Plan: +% introduction: +% readers should be at least slightly familiar with programming under +% Unix/Linux +% designed to be as independant of Universe specifics as possible +% use from application level and exports VME services for board specific +% drivers +% work in progress, specifically a way to use DMA from application +% level should be devised. + +% The preferred method is to use modules to load the universe and board +% specific drivers. There are several reasons for it: +% - the modprobe command in conjunction with the MODULE_PARM macros +% in the source code allow to change the configuration of the VME system +% by simply editing the relevant parameters in /etc/conf.modules +% - statically linking drivers in the kernel would require either +% - adding parameters in init/main.c and parsing code in the drivers/vme +% source files. +% - editing drivers/vme/Makefile or the board driver to hardcode things +% like board addresses and interrupts. +% +% None of these solutions seems attractive to justify the saving of a few +% kilobytes of kernel. + + +\section{Foreword} +What follows is a short description of a driver for Linux version 2.2 and how +it may be used, both from application level (user mode) code and as a +foundation layer to write kernel mode device drivers for specific VME boards. + +The goal was to make a simple interface yet powerful enough for most real life +applications. It also tries to be generic enough to be applicable as a model +to other VME bridges; however the design of this driver has been +influenced by the capabilities and characteristics of the Universe: this is +hard to avoid. + +This is a work in progress and as such it is possible that the layout +of structures and values of symbolic constants\footnote{You may also suggest + better names for the symbolic constants, some of the choices are + very poor due to lack of imagination. Corrections, clarifications and + improvements to this documentation are also welcome} will change in the +future. + +However, the code is now stable enough however to ensure that changes +requiring significant modifications of code using the services of the driver +should be exceptional. + +This driver has been designed to be used in VME systems with a single +master (the Universe itself) and only slaves on the VMEBus. Although nothing +in theory prevents from using it in systems with multiple masters, it has not +been tested under these conditions. + +%The following assumes a minimal knowledge of kernel programming and +%concepts. It is not a primer on VME bus either. Trying to understand +%the source code of the driver without an Universe manual {\and} the +%relevant errata is probably a loss of time. + +\section{Configuring your kernel} +As a first step, you have to select CONFIG\_EXPERIMENTAL in the +`General Setup' menu. This will enable a new menu\footnote{On the PPC and i386 + architectures only for the moment} item titled `VME bus support' +which will present a list of options. + +The first of these options, CONFIG\_VMEBUS has to be enabled and cannot +for now be modularized (it adds so little code to the kernel that it's +not worth giving the `m' option). The others are (at least) the universe +driver and perhaps additional board specific modules. + +If you add a new board specific driver, please prefix the corresponding +configuration option with CONFIG\_VME\_ to limit the risks of name clashes. + +\section{Driver loading and chip initialization} +We have unfortunately now to tackle one of the most difficult problems +with the Universe chip. It is so dependant on the specific board and its +firmware that only generic indications can be given. + +Let us start from the driver requirements. It needs: +\begin{itemize} +\item access to the registers, normally the PCI initialization sets it +correctly at power up since their location is defined by a standard PCI base +register in the configuration space. +\item PCI slave image 0 has to be allocated 8 kB of free PCI memory space +and is reserved for internal uses by the driver. +\item all other slave images, including the special slave image, must be +allocated in PCI memory space, I/O space is not supported. +\end{itemize} +In the universe\_setup function, there is one section which is compiled +whenever UNIVERSE\_IMAGE\_SETUP is defined. This section should be enabled +and edited to suit the needs of each system if the firmware does not provide +the necessary functionality, but beware of conflicts in the PCI memory +space. The settings in the early parts of this function can also be modified +to select specific global features of the Universe like time outs... + +The universe module supports one parameter: permanent\_maps which is a +bitmask of the images which should be permanently mapped for access by +kernel drivers (you may also change its default value by editing the source +code). The least significant bit represents the special slave +image, other images are represented by bit of weight $2^n$, where n +is between 1 and~3 or~7 depending on Universe version number. Permanent +mappings consume precious kernel virtual address space and should be used +sparingly, but may bring some performance improvements on architectures +which have special mechanisms to allow mapping large chunks of virtual +address spaces. + + +\section{Accessing the VME bus from application programs} + +\subsection{Opening the device} + +To access devices on the VMEBus, you simply use the standard open system call +with the appropriate parameters. + +This device currently uses major number 60 reserved for experimental +drivers (the corresponding device can be created by `mknod /dev/vme c 60 0'). +An official number might be allocated in the future. + +Considering the number of different VME boards in existence +and to avoid creating name conflicts, it {\em might} be better to create a +separate /dev/vme directory and give raw access to VME devices by +opening /dev/vme/raw. This is however a matter of local policy on each system +and will not be further discussed. + +\subsection{Selecting the type of VME accesses you want to perform} + +Here things start to be more complex, VME provides a variety of +address spaces characterized by the number of address bits, the +privilege level, whether bursts transfers used or not, etc\ldots + +However, using as many device minors as potential addressing combinations +is not practical simply because it would have required to create +a large number of device files, possibly even hitting the current 8 bit limit +on minors (especially when you consider all the options given in the driver +and the number of possible combinations with the extended addressing mode +field introduced by the 2eVME standard). So there is a single minor device +number (0), reserving all other minors for board specific\footnote{Given the + number of VME boards in existence, minor numbers will probably have to be + allocated in a system specific or organization specific way. Alternatively + an ioctl to load or select a specific board interface to override the + default /dev/vme interface described here might be added.} +device drivers. + +For this reason, the combination of address modifier and data width to use +when accessing a device is specified with an ioctl called VME\_SET\_ATTR, +which takes as a parameter a structure of type VME\_attr consisting +of~3~fields. + +Two of these fields, {\em base} and {\em limit}, simply specify the address +of the first and last byte you want to access in the space described by the +third field called {\em flags} which specifies three things: the VME address +space you want to access, the possible data width or widths to use and through +which mechanisms you want to use to access this area. + +The first 2 are specified using one the names listed in table~\ref{tbl:am}. +When accessing some devices, it is possible that the maximum data width +enforced by the bridge (which in the case of the Universe, splits large +transfers into a series of smaller ones) is unimportant. In this case, +the width parameter to the VME\_AM may be a the logical or of several +data widthsse. For example, to access a D08(O) slave in the +privileged A16 space, the flags may be set to VME\_AM\_A16\_PRIV(8|16|32). + +Given the characteristics of the VMEbus, the following width settings make +sense: +\begin{itemize} +\item[8:] to force the Universe to split multibyte transfers into a series of +single byte transfers, this is only useful with D08(EO) slaves which are +quite rare. +\item[16:] to force the Universe to split 32 bit (or larger) accesses into + a series of 16 bit transfers, this is significantly faster then consecutive + 16 bit accesses when accessing D16 slaves. +\item[32:] to get maximal performance when accessing D32 slaves, +\item[16|32:] to access D16 slaves when no single access\footnote{Some + processors may require inserting an explicit barrier instruction to + prevent merging accesses to adjacent locations into a single larger + transfer, for example on PowerPC an `eieio' instruction may be required.} + will be larger than 16 bits. +\item[$8|16|32$:] to access D08 slaves when only byte access will be + performed, which is always the case for D08(O) boards. +\end{itemize} + + +\begin{table} +\halign{#\strut\vrule&\quad#\hfil\quad&\vrule#&\quad\hfil#\hfil\quad&\vrule#\cr +\noalign{\hrule} +&Name&&Possible widths&\cr +\noalign{\hrule} +&VME\_AM\_A16( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A16\_PRIV( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A24( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A24\_PRIV( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A24\_PROG( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A24\_PROG\_PRIV( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A24\_BLT( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A24\_BLT\_PRIV( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A24\_MBLT( )&&64&\cr +\noalign{\hrule} +&VME\_AM\_A24\_MBLT\_PRIV( )&&64&\cr +\noalign{\hrule} +&VME\_AM\_A32( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A32\_PRIV( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A32\_PROG( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A32\_PROG\_PRIV( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A32\_BLT( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A32\_BLT\_PRIV( )&&8, 16, or 32&\cr +\noalign{\hrule} +&VME\_AM\_A32\_MBLT( )&&64&\cr +\noalign{\hrule} +&VME\_AM\_A32\_MBLT\_PRIV( )&&64&\cr +\noalign{\hrule} +&VME\_AM\_CRCSR( )&&8, 16, or 32&\cr +\noalign{\hrule} +} +\caption[VME address modifier names]{VME addres modifier names, they are all +followed by the desired data width between parentheses.}\label{tbl:am} +\end{table} + +\subsection{Diagnostic ioctls} + +ioctl is designed to provide a safe way to check for the presence of a device. +It performs a read or write cycle to the specified vme address space with the +required characteristics and checks for bus errors or timeouts in a safe way +(returning -EIO in this case). + +Reading and writing: read and write calls are not guaranteed to work because +of the limited virtual address space from the kernel. These functions have +been implemented on some artchitectures only (PPC for now). They are also +implemented in a way that minimizes the number of instructions which transfer +data between the VMEbus and the processor. Therefore the data bus width +parameter used when selecting the attributes should match exactly the +capabilities of the slave. + +The preferred method is to use the mmap system call to map the device in the +virtual address space of the process. + + +Accessing a device directly by memory mapping: + +Other functions: +RMW cycles: ioctl. + +\section{Access to VME from kernel mode (writing board specific drivers)} + +\subsection{Registering a driver} +when loaded, note that minor number may +be a parameter in /etc/conf.modules and the device might also be +created dynamically by scripts to ensure that they match). A genuine +dynamic minor allocation scheme might be added in the future. + +\subsection{Checking that a device is responding} +Most VME boards do not implement the CR/CSR address space and are simply +configured by setting jumpers, which is error-prone. A given board might +also have been unplugged but boot scripts might start a process which needs +access to the board or loads the corresponding driver. For these reasons +a simple mechanism to help checking for hardware failures has been +implemented, the function vme\_safe\_access(u\_int code, u\_int flags, +u\_long offset, u\_long *data) performs a single access to the VMEbus +and checks for bus errors or timeouts. code is one of the VME\_safe\_access +ioctl codes, flags specifies the address space (the USE field is ignored), +offset and data have their obvious meanings. + + +\subsection{Direct access to VMEbus devices} + struct vme\_region + vme\_register\_region (and unregister) + vme\_map\_region ? + vme\_check\_region ? + shows in /proc + +cheks are performed regardless of the transfer type (MBLT/BLT/DATA/PROGRAM +and PRIVILEGE) because a) MBLT transfers will fallback to BLT for transfers +of less than 8 bytes and to DATA or PROGRAM for RMW cycles. +b) Non privileged areas also respond to privileged transfers. + +\subsection{Interrupts} + struct vme\_interrupt + +next: handled by the Universe driver (must be initialized to NULL), + +device: must be initialized to NULL, handled by the Universe driver, + +handler: pointer to a function taking a vme\_interrupt parameter. This function +is called when the interrupt happens. It is not recommended to change +it while the interrupt is active (although nothing serious should happen). + +%Q: what happens if an interrupt is disabled but the vector has already been +%acquired by the universe. A: there is a bug in the current implementation, +% we should read the Universe to check that the vector has not been fetched. +% Beware that write posting may play havoc with this (disabling the interrupt +% is queued in the TxFIFO, but the PWON counter causes the bridge to fetch the +% vector and later the interrupt happens. This requires some thought. + +level, vector: must be initialized before calling vme\_request\_interrupt and +may not be changed until the interrupt is released. + + vme\_request\_interrupt + + vme\_release\_interrupt + + shows in /proc + +\subsubsection{Interrupt handlers} + +\subsection{DMA} + + struct vme\_dma, vme\_dmavec + +When can each and every element be modified: + +next: handled by the Universe driver (must be initialized to NULL), + +queue: only used by the Universe driver while the DMA is being performed +or waiting for its turn in the queue, free for use by the board specific +driver while not queued (for example to handle its own list of free dmalists). + +device: must be initialized to NULL, handled by the Universe driver, + +maxfrags: must be initialized before calling vme\_alloc\_dmalst and may not be +changed until vme\_free\_dmalist is called + +private: must be initialized to NULL, handled by the Universe driver + +timeout: must be set before calling vme\_queue\_dmalist, never modified +by the Universe driver + + + vme\_alloc\_dmalist (name might change) + +In the current implementation, setting the maximum number of fragments to +a non power of 2 wastes some memory since the enforced limit will be lower +than the amount actually needed. + + vme\_queue\_dmalist + +warning, there is no check that the queued list is not already on the queue. +Writers of device drivers are supposed to be able to avoid these problems +(note that they may keep the dmalists on their own queue since the +queue element of the structure is only used by the Universe driver while +the dma is on the active list). + + vme\_free\_dmalist + +Limitations: the size of the private area used by the driver to build the +scatter/gather list is at most one page. This limits the list to 128 entries +on architectures with a 4kB page size. This is not a practical limitation since +there is no limit on the number of entries in the dma operation queue, +although it will cause a slight overhead consistying of one additional +interrupt for every 128 elements in the DMA list (every 512kB if the list is +used to scatter/gather page sized chunks). + +% Suggestion: why not add dynamic scatter gather list allocation at +% queue_dmalist time ? +% Anwser: it would have to be allowed to fail if called from interrupt +% and would require more checks. Most real time systems know how much data +% they will require at most. + + +\subsubsection{DMA termination handlers} +Called whenever the DMA completes (status=0), times out (status=-ETIME) or +is aborted due to a bus error (status=-EIO). + + + +\subsection{Other functions} +vme\_safe\_access + +checks for bus errors and returns -EIO if they happen + +vme\_special\_access +Beware of portability: semantics dictated by the way the Universe +implements them. + +\subsection{Restrictions} +Interrupt and DMA termination handlers can only call vme\_safe\_access, +vme\_special\_access and vme\_queue\_dmalist. All other calls from the handlers +are forbidden and might calls system deadlocks, especially in multiprocessor +(SMP) systems. + +Side note: it might seem strange to also allow vme\_safe\_access since it does +not seem to have any practical usage. However, imagine that there is an +interrupt which tells that a new device is present(thanks to hot-plugging), +then allowing this function for probing the new devices makes sense. Or a +loaded driver may occasionally set a timer and periodically check for the +presence of a board (this is safe if the board includes a CR/CSR space whose +address is setup by the geographical addressing pins). + +%why not add a sysfail interrrupt and register handlers that will +%check if a board is still present when the interrupt occurs. + +\subsection{Unregistering the driver} +when unloading the module. +(releases all allocated resources as reported by /proc interface as +well as dma scatter-gather lists). + +\section{Caveats} + +VMEBus monopolization due to CWT. + +Getting an interrupt although it has been disabled, because of the write +posting delay effect on interrupt control registers. May use vme\_safe\_access +to disable interrupts before releasing them or unregistering the device. + +mmap followed by vme\_set\_attr will keep the mmap valid although it might +point to a region with very different attributes than told by vme\_get\_attr. + +Read carefully the documented bugs of your version of the Universe chip +(I or II) before modifying some control registers or enabling write posting +on slave images. Some of the bugs can lead to fatal bus deadlocks or no +workaround has been implemented by the driver because it seemed too complex, +like the problems of Universe revision I with bus errors on write posted +cycles. + +% Nonreproductible performance, especially with DMA accesses: the Universe +% performs transfers between 2 busses which run from different clocks. +% The board used to test the DMA functions (a simple VME memory board) +% uses the VME backplane clock to insert wait states, this 16 Mhz clock +% is generated by the Universe by dividing its 64 Mhz clock input by 4, +% the internal logic of the Universe seems to use more +% Experience has shown that the time it takes to transfer a given amount +% of data varies after eah power cycle. This is probably due to the +% random phase relationships between the clocks used for PCI and VME +% side which on the test system are apparently derived from the same +% base oscillator and generated by PLLs. Smallest common multiple of clock +% periods (64 MHz and 33 MHz is 750 nS). Performance variation is of the +% order of 10\%. + +Disabling an interrupt in a device does not necessarily mean that the +interrupt handler will not be invoked, because the interrupt line may have been +activated and the Universe may be acquiring the interrupt vector at the time +the access to mask the interrupt is attempted. This means that interrupt +handlers {\em must} be written to handle this case (they may be as simple as +reading an in memory flag telling that it has to ignore the interrupt and +return immediately). This is especially true when posted writes are enabled +since the window for this situation to happen becomes considerably longer. + +The same applies when unregistering a driver: the interrupts must first be +masked before vme\_unregister\_device is called, but there are scenarios +where the interrupt Universe interrupt handler might obtain the vector of an +interrupt which has been freed. This situation is not easy to handle properly +in all cases, the Universe driver will simply ignore this interrupt, but if +the same vector is feched again within a too short amount of time, it will +decide that the interrupt line is stuck, issue a message and mask the +corresponding interrupt line forever. The best solution is to take steps to +prevent this by either performing a read access to a VME device after +writing the interrupt mask (this flushes the write posting queue in the +Universe) or using a vme\_safe\_access call to disable the interrupts. + + +\section{Remaining problems} +The RMW cycle ioctl is still bound to change and is very likely +Universe specific, given the semantics of the special cycle generator. + +If the DMA timeouts, the VMEBus is possibly completely locked up, the +only solution might be a VME bus reset but this has been neither tested +nor implemented. + +Some people might like to be able to prioritize DMA operations. This has not +been implemented for now. + +\section{Unsupported Universe features} + +\begin{itemize} +\item Universe II specific features, this includes location monitors, + mailboxes, semaphores. These are designed to be used in multiple master + VME systems. The only multimaster feature currently supported is the + capability of performing RMW cycles. ADO cycles are not supported either + although they are fairly easy to add. + +\item Generating interrupts by software on the VMEbus, this is another + multimaster feature which is implemented in a much more flexible way in the + Universe II than Universe I. + +\item System wide functions: handling ACFAIL and SYSFAIL interrupts, + bus isolation, and other miscellaneous obscure features. Reset is not + yet handled but might be allowed at driver initialization in a future + release, it is too dangerous to allow it at any other time when drivers + are expecting interrupts or DMA is in progress. + +\item Handling of bus errors other than through the safe accesses (for example + the error logs on posted writes). + +\item User AM Codes, they would be quite easy to add in the current framework + if needed, however. + +\item By definition, anything else which has not been explicitly stated + as supported is unsupported. +\end{itemize} + + +%Normal setup on PPC MVME: 8kB for image 0 which is dynamically remapped +%to satisfy ioctl/access requests. Other images may be used as desired but +%their attributes can't be changed without unloading and reloading the driver. +%Suggested use is image 1 for CR/CSR, image 2 for 64 or 128 Mb of A32/D32 +%space, images 3 to 7 can be set at will for user mode only mappings or +%whatever is needed (BLT transfers are in all cases much better handled by +%DMA). + +%Overlapping slave images are not supported and might cause a system crash. + +\end{document} diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/vme_init.c linux/drivers/vme/vme_init.c --- linux-2.2.12/drivers/vme/vme_init.c Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/vme_init.c Mon Apr 12 14:56:56 1999 @@ -0,0 +1,48 @@ +/* + * drivers/vme/vme_init.c -- Generic top level vme driver. + * + * Copyright (C) 1997-1999 Gabriel Paubert, paubert@iram.es + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include + + + +#ifdef CONFIG_VME_UNIVERSE +void universe_init(void); +#endif +#ifdef CONFIG_VME_BC336 +void bc336_init(void); +#endif + +/* Actually this stub is present mostly to force some generic support + * functions into the kernel by bundling them with O_TARGET in the Makefile. + * This code won't be modularized for now, it adds about altogether 600 + * bytes to the kernel on a PPC while a module takes at least one page. It + * might be modularized later when more functions are moved here, for example + * the /proc/bus/vme interface code. + */ +__init void vmebus_init(void) { +#ifdef CONFIG_VME_UNIVERSE + universe_init(); +#endif +#ifdef CONFIG_VME_BC336 + bc336_init(); +#endif + /* Add your own drivers here if they are ever used in a non + * modularized way. + */ +} + +#ifndef __powerpc__ +/* Put here the portable versions of copy_user_to_io and copy_io_to_user + * functions. + */ +#endif diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/drivers/vme/vme_syms.c linux/drivers/vme/vme_syms.c --- linux-2.2.12/drivers/vme/vme_syms.c Thu Jan 1 01:00:00 1970 +++ linux/drivers/vme/vme_syms.c Mon Apr 12 14:54:53 1999 @@ -0,0 +1,4 @@ +#include +#include + +EXPORT_SYMBOL(copy_user_io); Common subdirectories: linux-2.2.12/include/linux/byteorder and linux/include/linux/byteorder Common subdirectories: linux-2.2.12/include/linux/lockd and linux/include/linux/lockd Common subdirectories: linux-2.2.12/include/linux/nfsd and linux/include/linux/nfsd diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12/include/linux/proc_fs.h linux/include/linux/proc_fs.h --- linux-2.2.12/include/linux/proc_fs.h Fri Aug 27 11:19:18 1999 +++ linux/include/linux/proc_fs.h Thu Oct 14 17:01:12 1999 @@ -230,6 +230,10 @@ PROC_BUS_PCI_DEVICES, PROC_BUS_ZORRO, PROC_BUS_ZORRO_DEVICES, + PROC_BUS_VME, + PROC_BUS_VME_DEVICES, + PROC_BUS_VME_REGIONS, + PROC_BUS_VME_INTERRUPTS, PROC_BUS_LAST }; Common subdirectories: linux-2.2.12/include/linux/sunrpc and linux/include/linux/sunrpc diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12//include/vme/bc336.h linux//include/vme/bc336.h --- linux-2.2.12//include/vme/bc336.h Thu Jan 1 01:00:00 1970 +++ linux//include/vme/bc336.h Mon Nov 15 13:23:15 1999 @@ -0,0 +1,14 @@ +/* Header file for bc336 time code processor board. */ +#ifndef _VME_BC336_H +#define _VME_BC336_H +#include +typedef struct { + u_int yday; + u_int hour, min, sec; + u_int halfmicroseconds; + u_int state; +} bc336_timeval; + +#define BC336_READ _IOR(VME_MAGIC, 0x80, bc336_timeval) +#define BC336_WRITE _IOW(VME_MAGIC, 0x80, bc336_timeval) +#endif diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12//include/vme/correl92.h linux//include/vme/correl92.h --- linux-2.2.12//include/vme/correl92.h Thu Jan 1 01:00:00 1970 +++ linux//include/vme/correl92.h Mon Nov 15 13:23:21 1999 @@ -0,0 +1,68 @@ +/* Header file for IRAM correlator with dutch chips. */ + +#ifndef _VME_CORREL92_H +#define _VME_CORREL92_H +#include +#define CONST_1SECOND 1875000 /* 1 second on the timing channel */ +#define BOARDCHIPS 16 /* 16 chips per board */ +#define CHIPCHANS 16 /* 16 channels per chip */ +#define NBOARD 8 /* 8 boards per correlator */ +#define NIRM 4 +#define NSAMP (2*NIRM) +#define MAXSAMP 16 +typedef union { + unsigned short ctlstat; + u_char filler1[32]; +} chipctl; + +typedef union { + unsigned short voltage; + u_char __fill[256]; +} samplerdac; + +typedef union { + unsigned short uu; + u_char __fill[512]; +} samplerctl; + +/* Note that not all channels, samplers, etc, are present, but the + * addresses are fixed whatever the number of boards installed. + */ + +typedef volatile struct { + /* 0x0000 */ + unsigned short data[4096]; + /* 0x2000 */ + chipctl control[256]; + /* 0x4000 */ + u_char __filler1[0xC00]; /* Not used here */ + /* 0x4c00 */ + unsigned short interrupt; + u_char __filler2[0x200-sizeof(short)]; + /* 0x4e00 */ + unsigned short display; + u_char __filler3[0x200-sizeof(short)]; + /* 0x5000 */ + u_char __filler4[0x1000]; + /* 0x6000 */ + samplerdac dacs[16]; + /* 0x7000 */ + samplerctl mode[8]; +} corr_hardware; + +typedef struct { + int nphase; + int reqtime; +} corr_mode; + +typedef struct { + unsigned time; + unsigned sequence; + unsigned tstamp; + int status; + int phase; + unsigned data[2048]; +} corr_raw_spectrum; + +#define CORR_SET_MODE _IOW(VME_MAGIC, 0x10, corr_mode) +#endif diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12//include/vme/correl99.h linux//include/vme/correl99.h --- linux-2.2.12//include/vme/correl99.h Thu Jan 1 01:00:00 1970 +++ linux//include/vme/correl99.h Thu Jul 15 14:05:59 1999 @@ -0,0 +1,45 @@ +/* Header file for IRAM correlator with IRAM chips. */ + +#ifndef _VME_CORREL99_H +#define _VME_CORREL99_H +#include +#define CONST_1SECOND 937500 /* 1 second on the timing channel */ +#define CHIPCHANS 64 /* 64 channels per chip */ +#define BOARDCHIPS 16 /* 16 chips per board */ +#define NBOARD 15 /* 15 boards per correlator */ +#define CHANNELS (NBOARD*BOARDCHIPS*CHIPCHANS) +#define NIRM 6 +#define NSAMP (2*NIRM) + +/* Note that not all channels, samplers, etc, are present, but the + * addresses are fixed whatever the number of boards installed. + */ + +typedef struct { + /* 0x0000 */ + unsigned short data[CHANNELS]; + /* 0x7800 */ + u_char __filler1[0x800]; + /* 0x8000 */ + struct { + unsigned short osrba; + unsigned short osrdc; + unsigned short tcr; + u_char __filler2[0x7a]; + } + control[NBOARD][BOARDCHIPS]; + /* 0xf800 */ + unsigned short misc[0x400]; +} corr_hw; + +typedef struct { + int acquire; +} corr_mode; + +typedef struct { + unsigned tstamp; + unsigned data[CHANNELS]; +} corr_spectrum; + +#define CORR_SET_MODE _IOW(VME_MAGIC, 0x10, corr_mode) +#endif diff -uN --exclude=.* --exclude=*.o --exclude=*~ --exclude=*.gz --exclude=*version*.h --exclude=compile.h --exclude=autoconf.h --exclude=pci.h linux-2.2.12//include/vme/vme.h linux//include/vme/vme.h --- linux-2.2.12//include/vme/vme.h Thu Jan 1 01:00:00 1970 +++ linux//include/vme/vme.h Mon Nov 15 10:52:51 1999 @@ -0,0 +1,409 @@ +/* Private kernel definitions for Linux VME support. + * G.Paubert 1997-1999 + */ +#ifndef _VME_VME_H +#define _VME_VME_H + +#define VME_MAGIC ('V'+'M'+'E') /* Should be 0xE8 */ + +#include +#include + +/* The VME address modifiers in numerical order. Some are still missing, + * namely A40, MD32, and 2eBLT. The official VME nomenclature is quite + * confusing, defining different names for address modifiers and corresponding + * capabilities, with the following non obvious equivalences: + * - A16 <-> SHORT + * - A24 <-> STANDARD + * - A32 <-> EXTENDED + * - A64 <-> LONG + * Only the Ann notation will be employed because it seems much clearer, + * and the default will be non-privileged data access. + */ +#define VME_DW_SHIFT 8 +#define VME_DW_MASK 0x00000f00 +#define VME_DW(x) (((x)<<(VME_DW_SHIFT-3)) & VME_DW_MASK) + +/* Since 2eBLT makes use of 8 bits of extended address modifier codes, + * we leave room in the flags to implement it in the future. + */ + +#define VME_AM_MASK 0x3f000000 +#define VME_AM_SHIFT 24 +#define VME_AM(x) (((x)<vme_rxx, same for write + * and the ones with included barriers would be vme_rawxx, vme_rarxx, + * and vme_wawxx. + */ +#if defined(__powerpc__) +#define vme_rar_barrier() __asm__ __volatile__("eieio") +#define vme_raw_barrier() __asm__ __volatile__("eieio") +#define vme_waw_barrier() __asm__ __volatile__("eieio") +#elif defined(__mc68000__) || defined(__i386__) +#define vme_rar_barrier() do { } while(0) +#define vme_raw_barrier() do { } while(0) +#define vme_waw_barrier() do { } while(0) +#else +#error "VME bus is not supported under Linux for this architecture" +#endif + +#include + +struct vme_device { + struct vme_device *next; + struct file_operations * fops; + /* These 3 fields are necessary to perform automatic + * deallocation when unregistering a device. + */ + struct vme_region *regions; + struct vme_interrupt *interrupts; + struct vme_dma *dmalists; + char * name; + void * private; + u_int minor; +}; + +struct vme_interrupt { + struct vme_interrupt * next; + void (*handler)(struct vme_interrupt *); + struct vme_device *device; + void * handler_data; + char * name; + u_long count; + u_int level, vector; + u_int flags; /* interrupt attributes */ +}; + +struct vme_region { + struct vme_region * next; + struct vme_device * device; + volatile u_char *kvaddr; + u_long phyaddr; + u_long base; + u_long limit; + u_int flags; +}; + +struct vme_dma { + struct vme_dma * next; + struct vme_dma * queue; /* To queue DMA requests */ + struct vme_device * device; + void * private; /* Memory pointer to the head of the list */ + void (*handler)(struct vme_dma *); + void * handler_data; + size_t maxfrags; /* Maximum number of scatter/gather fragments */ + size_t remlen; + long timeout; /* timeout in jiffies */ + u_int flags; /* VME_DMA_BUSY and other internal flags */ + u32 error; /* Error status: 0, -EIO, -ETIME, ... */ +}; + +/* Set by queue_dmalist and cleared by release_dmalist */ +#define VME_DMA_BUSY 0 + +#define VME_DMA_READY 1 + +struct vme_dmavec { + u_long kvaddr; + u_long vme_addr; + size_t length; + u32 flags; +}; +#if defined(__powerpc__) +/* These functions have been optimized on PPC, a portable version should + * be written for other architectures and be exported as a general kernel + * service. + */ +extern long copy_user_io(volatile void *, volatile const void*, u_long); +#define copy_io_to_user(d,s,l) copy_user_io(d,s,l) +#define copy_user_to_io(d,s,l) copy_user_io(d,s,l) +#endif + + +int +vme_register_device(struct vme_device *); + +void +vme_unregister_device(struct vme_device *); + +int +vme_register_region(struct vme_device *, struct vme_region *); + +void +vme_unregister_region(struct vme_region *); + +int +vme_request_interrupt(struct vme_device *, struct vme_interrupt *); + +void +vme_free_interrupt(struct vme_interrupt *); + +/* This interface might still change: although it seems pretty stable now. */ +int +vme_alloc_dmalist(struct vme_device *, struct vme_dma *, size_t); + +void +vme_free_dmalist(struct vme_dma *); + +int +vme_queue_dmalist(struct vme_dma *, struct vme_dmavec *, size_t); + +/* This function has to be called in the dma termination handlers. */ +extern inline void +vme_release_dmalist(struct vme_dma * dma) { + clear_bit(VME_DMA_BUSY, &dma->flags); +} + +int +vme_safe_access(u_int, u32, u_long, u_long *); + +int +vme_modbits(u_int, u32, u_long, u_int *, u_int); + +#endif /*!_VME_VME_H*/ +